Show More
@@ -1,2174 +1,2174 b'' | |||
|
1 | 1 | # context.py - changeset and file context objects for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import os |
|
12 | 12 | import re |
|
13 | 13 | import stat |
|
14 | 14 | |
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .node import ( |
|
17 | 17 | addednodeid, |
|
18 | 18 | bin, |
|
19 | 19 | hex, |
|
20 | 20 | modifiednodeid, |
|
21 | 21 | nullid, |
|
22 | 22 | nullrev, |
|
23 | 23 | short, |
|
24 | 24 | wdirid, |
|
25 | 25 | wdirnodes, |
|
26 | 26 | ) |
|
27 | 27 | from . import ( |
|
28 | 28 | encoding, |
|
29 | 29 | error, |
|
30 | 30 | fileset, |
|
31 | 31 | match as matchmod, |
|
32 | 32 | mdiff, |
|
33 | 33 | obsolete as obsmod, |
|
34 | 34 | patch, |
|
35 | 35 | phases, |
|
36 | 36 | pycompat, |
|
37 | 37 | repoview, |
|
38 | 38 | revlog, |
|
39 | 39 | scmutil, |
|
40 | 40 | subrepo, |
|
41 | 41 | util, |
|
42 | 42 | ) |
|
43 | 43 | |
|
44 | 44 | propertycache = util.propertycache |
|
45 | 45 | |
|
46 | 46 | nonascii = re.compile(r'[^\x21-\x7f]').search |
|
47 | 47 | |
|
48 | 48 | class basectx(object): |
|
49 | 49 | """A basectx object represents the common logic for its children: |
|
50 | 50 | changectx: read-only context that is already present in the repo, |
|
51 | 51 | workingctx: a context that represents the working directory and can |
|
52 | 52 | be committed, |
|
53 | 53 | memctx: a context that represents changes in-memory and can also |
|
54 | 54 | be committed.""" |
|
55 | 55 | def __new__(cls, repo, changeid='', *args, **kwargs): |
|
56 | 56 | if isinstance(changeid, basectx): |
|
57 | 57 | return changeid |
|
58 | 58 | |
|
59 | 59 | o = super(basectx, cls).__new__(cls) |
|
60 | 60 | |
|
61 | 61 | o._repo = repo |
|
62 | 62 | o._rev = nullrev |
|
63 | 63 | o._node = nullid |
|
64 | 64 | |
|
65 | 65 | return o |
|
66 | 66 | |
|
67 | 67 | def __str__(self): |
|
68 | 68 | r = short(self.node()) |
|
69 | 69 | if pycompat.ispy3: |
|
70 | 70 | return r.decode('ascii') |
|
71 | 71 | return r |
|
72 | 72 | |
|
73 | 73 | def __bytes__(self): |
|
74 | 74 | return short(self.node()) |
|
75 | 75 | |
|
76 | 76 | def __int__(self): |
|
77 | 77 | return self.rev() |
|
78 | 78 | |
|
79 | 79 | def __repr__(self): |
|
80 | 80 | return "<%s %s>" % (type(self).__name__, str(self)) |
|
81 | 81 | |
|
82 | 82 | def __eq__(self, other): |
|
83 | 83 | try: |
|
84 | 84 | return type(self) == type(other) and self._rev == other._rev |
|
85 | 85 | except AttributeError: |
|
86 | 86 | return False |
|
87 | 87 | |
|
88 | 88 | def __ne__(self, other): |
|
89 | 89 | return not (self == other) |
|
90 | 90 | |
|
91 | 91 | def __contains__(self, key): |
|
92 | 92 | return key in self._manifest |
|
93 | 93 | |
|
94 | 94 | def __getitem__(self, key): |
|
95 | 95 | return self.filectx(key) |
|
96 | 96 | |
|
97 | 97 | def __iter__(self): |
|
98 | 98 | return iter(self._manifest) |
|
99 | 99 | |
|
100 | 100 | def _buildstatusmanifest(self, status): |
|
101 | 101 | """Builds a manifest that includes the given status results, if this is |
|
102 | 102 | a working copy context. For non-working copy contexts, it just returns |
|
103 | 103 | the normal manifest.""" |
|
104 | 104 | return self.manifest() |
|
105 | 105 | |
|
106 | 106 | def _matchstatus(self, other, match): |
|
107 | 107 | """return match.always if match is none |
|
108 | 108 | |
|
109 | 109 | This internal method provides a way for child objects to override the |
|
110 | 110 | match operator. |
|
111 | 111 | """ |
|
112 | 112 | return match or matchmod.always(self._repo.root, self._repo.getcwd()) |
|
113 | 113 | |
|
114 | 114 | def _buildstatus(self, other, s, match, listignored, listclean, |
|
115 | 115 | listunknown): |
|
116 | 116 | """build a status with respect to another context""" |
|
117 | 117 | # Load earliest manifest first for caching reasons. More specifically, |
|
118 | 118 | # if you have revisions 1000 and 1001, 1001 is probably stored as a |
|
119 | 119 | # delta against 1000. Thus, if you read 1000 first, we'll reconstruct |
|
120 | 120 | # 1000 and cache it so that when you read 1001, we just need to apply a |
|
121 | 121 | # delta to what's in the cache. So that's one full reconstruction + one |
|
122 | 122 | # delta application. |
|
123 | 123 | mf2 = None |
|
124 | 124 | if self.rev() is not None and self.rev() < other.rev(): |
|
125 | 125 | mf2 = self._buildstatusmanifest(s) |
|
126 | 126 | mf1 = other._buildstatusmanifest(s) |
|
127 | 127 | if mf2 is None: |
|
128 | 128 | mf2 = self._buildstatusmanifest(s) |
|
129 | 129 | |
|
130 | 130 | modified, added = [], [] |
|
131 | 131 | removed = [] |
|
132 | 132 | clean = [] |
|
133 | 133 | deleted, unknown, ignored = s.deleted, s.unknown, s.ignored |
|
134 | 134 | deletedset = set(deleted) |
|
135 | 135 | d = mf1.diff(mf2, match=match, clean=listclean) |
|
136 | 136 | for fn, value in d.iteritems(): |
|
137 | 137 | if fn in deletedset: |
|
138 | 138 | continue |
|
139 | 139 | if value is None: |
|
140 | 140 | clean.append(fn) |
|
141 | 141 | continue |
|
142 | 142 | (node1, flag1), (node2, flag2) = value |
|
143 | 143 | if node1 is None: |
|
144 | 144 | added.append(fn) |
|
145 | 145 | elif node2 is None: |
|
146 | 146 | removed.append(fn) |
|
147 | 147 | elif flag1 != flag2: |
|
148 | 148 | modified.append(fn) |
|
149 | 149 | elif node2 not in wdirnodes: |
|
150 | 150 | # When comparing files between two commits, we save time by |
|
151 | 151 | # not comparing the file contents when the nodeids differ. |
|
152 | 152 | # Note that this means we incorrectly report a reverted change |
|
153 | 153 | # to a file as a modification. |
|
154 | 154 | modified.append(fn) |
|
155 | 155 | elif self[fn].cmp(other[fn]): |
|
156 | 156 | modified.append(fn) |
|
157 | 157 | else: |
|
158 | 158 | clean.append(fn) |
|
159 | 159 | |
|
160 | 160 | if removed: |
|
161 | 161 | # need to filter files if they are already reported as removed |
|
162 | 162 | unknown = [fn for fn in unknown if fn not in mf1 and |
|
163 | 163 | (not match or match(fn))] |
|
164 | 164 | ignored = [fn for fn in ignored if fn not in mf1 and |
|
165 | 165 | (not match or match(fn))] |
|
166 | 166 | # if they're deleted, don't report them as removed |
|
167 | 167 | removed = [fn for fn in removed if fn not in deletedset] |
|
168 | 168 | |
|
169 | 169 | return scmutil.status(modified, added, removed, deleted, unknown, |
|
170 | 170 | ignored, clean) |
|
171 | 171 | |
|
172 | 172 | @propertycache |
|
173 | 173 | def substate(self): |
|
174 | 174 | return subrepo.state(self, self._repo.ui) |
|
175 | 175 | |
|
176 | 176 | def subrev(self, subpath): |
|
177 | 177 | return self.substate[subpath][1] |
|
178 | 178 | |
|
179 | 179 | def rev(self): |
|
180 | 180 | return self._rev |
|
181 | 181 | def node(self): |
|
182 | 182 | return self._node |
|
183 | 183 | def hex(self): |
|
184 | 184 | return hex(self.node()) |
|
185 | 185 | def manifest(self): |
|
186 | 186 | return self._manifest |
|
187 | 187 | def manifestctx(self): |
|
188 | 188 | return self._manifestctx |
|
189 | 189 | def repo(self): |
|
190 | 190 | return self._repo |
|
191 | 191 | def phasestr(self): |
|
192 | 192 | return phases.phasenames[self.phase()] |
|
193 | 193 | def mutable(self): |
|
194 | 194 | return self.phase() > phases.public |
|
195 | 195 | |
|
196 | 196 | def getfileset(self, expr): |
|
197 | 197 | return fileset.getfileset(self, expr) |
|
198 | 198 | |
|
199 | 199 | def obsolete(self): |
|
200 | 200 | """True if the changeset is obsolete""" |
|
201 | 201 | return self.rev() in obsmod.getrevs(self._repo, 'obsolete') |
|
202 | 202 | |
|
203 | 203 | def extinct(self): |
|
204 | 204 | """True if the changeset is extinct""" |
|
205 | 205 | return self.rev() in obsmod.getrevs(self._repo, 'extinct') |
|
206 | 206 | |
|
207 | 207 | def unstable(self): |
|
208 | 208 | """True if the changeset is not obsolete but it's ancestor are""" |
|
209 | 209 | return self.rev() in obsmod.getrevs(self._repo, 'unstable') |
|
210 | 210 | |
|
211 | 211 | def bumped(self): |
|
212 | 212 | """True if the changeset try to be a successor of a public changeset |
|
213 | 213 | |
|
214 | 214 | Only non-public and non-obsolete changesets may be bumped. |
|
215 | 215 | """ |
|
216 | 216 | return self.rev() in obsmod.getrevs(self._repo, 'bumped') |
|
217 | 217 | |
|
218 | 218 | def divergent(self): |
|
219 | 219 | """Is a successors of a changeset with multiple possible successors set |
|
220 | 220 | |
|
221 | 221 | Only non-public and non-obsolete changesets may be divergent. |
|
222 | 222 | """ |
|
223 | 223 | return self.rev() in obsmod.getrevs(self._repo, 'divergent') |
|
224 | 224 | |
|
225 | 225 | def troubled(self): |
|
226 | 226 | """True if the changeset is either unstable, bumped or divergent""" |
|
227 | 227 | return self.unstable() or self.bumped() or self.divergent() |
|
228 | 228 | |
|
229 | 229 | def troubles(self): |
|
230 | 230 | """return the list of troubles affecting this changesets. |
|
231 | 231 | |
|
232 | 232 | Troubles are returned as strings. possible values are: |
|
233 | 233 | - unstable, |
|
234 | 234 | - bumped, |
|
235 | 235 | - divergent. |
|
236 | 236 | """ |
|
237 | 237 | troubles = [] |
|
238 | 238 | if self.unstable(): |
|
239 | 239 | troubles.append('unstable') |
|
240 | 240 | if self.bumped(): |
|
241 | 241 | troubles.append('bumped') |
|
242 | 242 | if self.divergent(): |
|
243 | 243 | troubles.append('divergent') |
|
244 | 244 | return troubles |
|
245 | 245 | |
|
246 | 246 | def parents(self): |
|
247 | 247 | """return contexts for each parent changeset""" |
|
248 | 248 | return self._parents |
|
249 | 249 | |
|
250 | 250 | def p1(self): |
|
251 | 251 | return self._parents[0] |
|
252 | 252 | |
|
253 | 253 | def p2(self): |
|
254 | 254 | parents = self._parents |
|
255 | 255 | if len(parents) == 2: |
|
256 | 256 | return parents[1] |
|
257 | 257 | return changectx(self._repo, nullrev) |
|
258 | 258 | |
|
259 | 259 | def _fileinfo(self, path): |
|
260 | if '_manifest' in self.__dict__: | |
|
260 | if r'_manifest' in self.__dict__: | |
|
261 | 261 | try: |
|
262 | 262 | return self._manifest[path], self._manifest.flags(path) |
|
263 | 263 | except KeyError: |
|
264 | 264 | raise error.ManifestLookupError(self._node, path, |
|
265 | 265 | _('not found in manifest')) |
|
266 | if '_manifestdelta' in self.__dict__ or path in self.files(): | |
|
266 | if r'_manifestdelta' in self.__dict__ or path in self.files(): | |
|
267 | 267 | if path in self._manifestdelta: |
|
268 | 268 | return (self._manifestdelta[path], |
|
269 | 269 | self._manifestdelta.flags(path)) |
|
270 | 270 | mfl = self._repo.manifestlog |
|
271 | 271 | try: |
|
272 | 272 | node, flag = mfl[self._changeset.manifest].find(path) |
|
273 | 273 | except KeyError: |
|
274 | 274 | raise error.ManifestLookupError(self._node, path, |
|
275 | 275 | _('not found in manifest')) |
|
276 | 276 | |
|
277 | 277 | return node, flag |
|
278 | 278 | |
|
279 | 279 | def filenode(self, path): |
|
280 | 280 | return self._fileinfo(path)[0] |
|
281 | 281 | |
|
282 | 282 | def flags(self, path): |
|
283 | 283 | try: |
|
284 | 284 | return self._fileinfo(path)[1] |
|
285 | 285 | except error.LookupError: |
|
286 | 286 | return '' |
|
287 | 287 | |
|
288 | 288 | def sub(self, path, allowcreate=True): |
|
289 | 289 | '''return a subrepo for the stored revision of path, never wdir()''' |
|
290 | 290 | return subrepo.subrepo(self, path, allowcreate=allowcreate) |
|
291 | 291 | |
|
292 | 292 | def nullsub(self, path, pctx): |
|
293 | 293 | return subrepo.nullsubrepo(self, path, pctx) |
|
294 | 294 | |
|
295 | 295 | def workingsub(self, path): |
|
296 | 296 | '''return a subrepo for the stored revision, or wdir if this is a wdir |
|
297 | 297 | context. |
|
298 | 298 | ''' |
|
299 | 299 | return subrepo.subrepo(self, path, allowwdir=True) |
|
300 | 300 | |
|
301 | 301 | def match(self, pats=None, include=None, exclude=None, default='glob', |
|
302 | 302 | listsubrepos=False, badfn=None): |
|
303 | 303 | if pats is None: |
|
304 | 304 | pats = [] |
|
305 | 305 | r = self._repo |
|
306 | 306 | return matchmod.match(r.root, r.getcwd(), pats, |
|
307 | 307 | include, exclude, default, |
|
308 | 308 | auditor=r.nofsauditor, ctx=self, |
|
309 | 309 | listsubrepos=listsubrepos, badfn=badfn) |
|
310 | 310 | |
|
311 | 311 | def diff(self, ctx2=None, match=None, **opts): |
|
312 | 312 | """Returns a diff generator for the given contexts and matcher""" |
|
313 | 313 | if ctx2 is None: |
|
314 | 314 | ctx2 = self.p1() |
|
315 | 315 | if ctx2 is not None: |
|
316 | 316 | ctx2 = self._repo[ctx2] |
|
317 | 317 | diffopts = patch.diffopts(self._repo.ui, opts) |
|
318 | 318 | return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts) |
|
319 | 319 | |
|
320 | 320 | def dirs(self): |
|
321 | 321 | return self._manifest.dirs() |
|
322 | 322 | |
|
323 | 323 | def hasdir(self, dir): |
|
324 | 324 | return self._manifest.hasdir(dir) |
|
325 | 325 | |
|
326 | 326 | def dirty(self, missing=False, merge=True, branch=True): |
|
327 | 327 | return False |
|
328 | 328 | |
|
329 | 329 | def status(self, other=None, match=None, listignored=False, |
|
330 | 330 | listclean=False, listunknown=False, listsubrepos=False): |
|
331 | 331 | """return status of files between two nodes or node and working |
|
332 | 332 | directory. |
|
333 | 333 | |
|
334 | 334 | If other is None, compare this node with working directory. |
|
335 | 335 | |
|
336 | 336 | returns (modified, added, removed, deleted, unknown, ignored, clean) |
|
337 | 337 | """ |
|
338 | 338 | |
|
339 | 339 | ctx1 = self |
|
340 | 340 | ctx2 = self._repo[other] |
|
341 | 341 | |
|
342 | 342 | # This next code block is, admittedly, fragile logic that tests for |
|
343 | 343 | # reversing the contexts and wouldn't need to exist if it weren't for |
|
344 | 344 | # the fast (and common) code path of comparing the working directory |
|
345 | 345 | # with its first parent. |
|
346 | 346 | # |
|
347 | 347 | # What we're aiming for here is the ability to call: |
|
348 | 348 | # |
|
349 | 349 | # workingctx.status(parentctx) |
|
350 | 350 | # |
|
351 | 351 | # If we always built the manifest for each context and compared those, |
|
352 | 352 | # then we'd be done. But the special case of the above call means we |
|
353 | 353 | # just copy the manifest of the parent. |
|
354 | 354 | reversed = False |
|
355 | 355 | if (not isinstance(ctx1, changectx) |
|
356 | 356 | and isinstance(ctx2, changectx)): |
|
357 | 357 | reversed = True |
|
358 | 358 | ctx1, ctx2 = ctx2, ctx1 |
|
359 | 359 | |
|
360 | 360 | match = ctx2._matchstatus(ctx1, match) |
|
361 | 361 | r = scmutil.status([], [], [], [], [], [], []) |
|
362 | 362 | r = ctx2._buildstatus(ctx1, r, match, listignored, listclean, |
|
363 | 363 | listunknown) |
|
364 | 364 | |
|
365 | 365 | if reversed: |
|
366 | 366 | # Reverse added and removed. Clear deleted, unknown and ignored as |
|
367 | 367 | # these make no sense to reverse. |
|
368 | 368 | r = scmutil.status(r.modified, r.removed, r.added, [], [], [], |
|
369 | 369 | r.clean) |
|
370 | 370 | |
|
371 | 371 | if listsubrepos: |
|
372 | 372 | for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): |
|
373 | 373 | try: |
|
374 | 374 | rev2 = ctx2.subrev(subpath) |
|
375 | 375 | except KeyError: |
|
376 | 376 | # A subrepo that existed in node1 was deleted between |
|
377 | 377 | # node1 and node2 (inclusive). Thus, ctx2's substate |
|
378 | 378 | # won't contain that subpath. The best we can do ignore it. |
|
379 | 379 | rev2 = None |
|
380 | 380 | submatch = matchmod.subdirmatcher(subpath, match) |
|
381 | 381 | s = sub.status(rev2, match=submatch, ignored=listignored, |
|
382 | 382 | clean=listclean, unknown=listunknown, |
|
383 | 383 | listsubrepos=True) |
|
384 | 384 | for rfiles, sfiles in zip(r, s): |
|
385 | 385 | rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) |
|
386 | 386 | |
|
387 | 387 | for l in r: |
|
388 | 388 | l.sort() |
|
389 | 389 | |
|
390 | 390 | return r |
|
391 | 391 | |
|
392 | 392 | |
|
393 | 393 | def makememctx(repo, parents, text, user, date, branch, files, store, |
|
394 | 394 | editor=None, extra=None): |
|
395 | 395 | def getfilectx(repo, memctx, path): |
|
396 | 396 | data, mode, copied = store.getfile(path) |
|
397 | 397 | if data is None: |
|
398 | 398 | return None |
|
399 | 399 | islink, isexec = mode |
|
400 | 400 | return memfilectx(repo, path, data, islink=islink, isexec=isexec, |
|
401 | 401 | copied=copied, memctx=memctx) |
|
402 | 402 | if extra is None: |
|
403 | 403 | extra = {} |
|
404 | 404 | if branch: |
|
405 | 405 | extra['branch'] = encoding.fromlocal(branch) |
|
406 | 406 | ctx = memctx(repo, parents, text, files, getfilectx, user, |
|
407 | 407 | date, extra, editor) |
|
408 | 408 | return ctx |
|
409 | 409 | |
|
410 | 410 | def _filterederror(repo, changeid): |
|
411 | 411 | """build an exception to be raised about a filtered changeid |
|
412 | 412 | |
|
413 | 413 | This is extracted in a function to help extensions (eg: evolve) to |
|
414 | 414 | experiment with various message variants.""" |
|
415 | 415 | if repo.filtername.startswith('visible'): |
|
416 | 416 | msg = _("hidden revision '%s'") % changeid |
|
417 | 417 | hint = _('use --hidden to access hidden revisions') |
|
418 | 418 | return error.FilteredRepoLookupError(msg, hint=hint) |
|
419 | 419 | msg = _("filtered revision '%s' (not in '%s' subset)") |
|
420 | 420 | msg %= (changeid, repo.filtername) |
|
421 | 421 | return error.FilteredRepoLookupError(msg) |
|
422 | 422 | |
|
423 | 423 | class changectx(basectx): |
|
424 | 424 | """A changecontext object makes access to data related to a particular |
|
425 | 425 | changeset convenient. It represents a read-only context already present in |
|
426 | 426 | the repo.""" |
|
427 | 427 | def __init__(self, repo, changeid=''): |
|
428 | 428 | """changeid is a revision number, node, or tag""" |
|
429 | 429 | |
|
430 | 430 | # since basectx.__new__ already took care of copying the object, we |
|
431 | 431 | # don't need to do anything in __init__, so we just exit here |
|
432 | 432 | if isinstance(changeid, basectx): |
|
433 | 433 | return |
|
434 | 434 | |
|
435 | 435 | if changeid == '': |
|
436 | 436 | changeid = '.' |
|
437 | 437 | self._repo = repo |
|
438 | 438 | |
|
439 | 439 | try: |
|
440 | 440 | if isinstance(changeid, int): |
|
441 | 441 | self._node = repo.changelog.node(changeid) |
|
442 | 442 | self._rev = changeid |
|
443 | 443 | return |
|
444 | 444 | if not pycompat.ispy3 and isinstance(changeid, long): |
|
445 | 445 | changeid = str(changeid) |
|
446 | 446 | if changeid == 'null': |
|
447 | 447 | self._node = nullid |
|
448 | 448 | self._rev = nullrev |
|
449 | 449 | return |
|
450 | 450 | if changeid == 'tip': |
|
451 | 451 | self._node = repo.changelog.tip() |
|
452 | 452 | self._rev = repo.changelog.rev(self._node) |
|
453 | 453 | return |
|
454 | 454 | if changeid == '.' or changeid == repo.dirstate.p1(): |
|
455 | 455 | # this is a hack to delay/avoid loading obsmarkers |
|
456 | 456 | # when we know that '.' won't be hidden |
|
457 | 457 | self._node = repo.dirstate.p1() |
|
458 | 458 | self._rev = repo.unfiltered().changelog.rev(self._node) |
|
459 | 459 | return |
|
460 | 460 | if len(changeid) == 20: |
|
461 | 461 | try: |
|
462 | 462 | self._node = changeid |
|
463 | 463 | self._rev = repo.changelog.rev(changeid) |
|
464 | 464 | return |
|
465 | 465 | except error.FilteredRepoLookupError: |
|
466 | 466 | raise |
|
467 | 467 | except LookupError: |
|
468 | 468 | pass |
|
469 | 469 | |
|
470 | 470 | try: |
|
471 | 471 | r = int(changeid) |
|
472 | 472 | if '%d' % r != changeid: |
|
473 | 473 | raise ValueError |
|
474 | 474 | l = len(repo.changelog) |
|
475 | 475 | if r < 0: |
|
476 | 476 | r += l |
|
477 | 477 | if r < 0 or r >= l: |
|
478 | 478 | raise ValueError |
|
479 | 479 | self._rev = r |
|
480 | 480 | self._node = repo.changelog.node(r) |
|
481 | 481 | return |
|
482 | 482 | except error.FilteredIndexError: |
|
483 | 483 | raise |
|
484 | 484 | except (ValueError, OverflowError, IndexError): |
|
485 | 485 | pass |
|
486 | 486 | |
|
487 | 487 | if len(changeid) == 40: |
|
488 | 488 | try: |
|
489 | 489 | self._node = bin(changeid) |
|
490 | 490 | self._rev = repo.changelog.rev(self._node) |
|
491 | 491 | return |
|
492 | 492 | except error.FilteredLookupError: |
|
493 | 493 | raise |
|
494 | 494 | except (TypeError, LookupError): |
|
495 | 495 | pass |
|
496 | 496 | |
|
497 | 497 | # lookup bookmarks through the name interface |
|
498 | 498 | try: |
|
499 | 499 | self._node = repo.names.singlenode(repo, changeid) |
|
500 | 500 | self._rev = repo.changelog.rev(self._node) |
|
501 | 501 | return |
|
502 | 502 | except KeyError: |
|
503 | 503 | pass |
|
504 | 504 | except error.FilteredRepoLookupError: |
|
505 | 505 | raise |
|
506 | 506 | except error.RepoLookupError: |
|
507 | 507 | pass |
|
508 | 508 | |
|
509 | 509 | self._node = repo.unfiltered().changelog._partialmatch(changeid) |
|
510 | 510 | if self._node is not None: |
|
511 | 511 | self._rev = repo.changelog.rev(self._node) |
|
512 | 512 | return |
|
513 | 513 | |
|
514 | 514 | # lookup failed |
|
515 | 515 | # check if it might have come from damaged dirstate |
|
516 | 516 | # |
|
517 | 517 | # XXX we could avoid the unfiltered if we had a recognizable |
|
518 | 518 | # exception for filtered changeset access |
|
519 | 519 | if changeid in repo.unfiltered().dirstate.parents(): |
|
520 | 520 | msg = _("working directory has unknown parent '%s'!") |
|
521 | 521 | raise error.Abort(msg % short(changeid)) |
|
522 | 522 | try: |
|
523 | 523 | if len(changeid) == 20 and nonascii(changeid): |
|
524 | 524 | changeid = hex(changeid) |
|
525 | 525 | except TypeError: |
|
526 | 526 | pass |
|
527 | 527 | except (error.FilteredIndexError, error.FilteredLookupError, |
|
528 | 528 | error.FilteredRepoLookupError): |
|
529 | 529 | raise _filterederror(repo, changeid) |
|
530 | 530 | except IndexError: |
|
531 | 531 | pass |
|
532 | 532 | raise error.RepoLookupError( |
|
533 | 533 | _("unknown revision '%s'") % changeid) |
|
534 | 534 | |
|
535 | 535 | def __hash__(self): |
|
536 | 536 | try: |
|
537 | 537 | return hash(self._rev) |
|
538 | 538 | except AttributeError: |
|
539 | 539 | return id(self) |
|
540 | 540 | |
|
541 | 541 | def __nonzero__(self): |
|
542 | 542 | return self._rev != nullrev |
|
543 | 543 | |
|
544 | 544 | __bool__ = __nonzero__ |
|
545 | 545 | |
|
546 | 546 | @propertycache |
|
547 | 547 | def _changeset(self): |
|
548 | 548 | return self._repo.changelog.changelogrevision(self.rev()) |
|
549 | 549 | |
|
550 | 550 | @propertycache |
|
551 | 551 | def _manifest(self): |
|
552 | 552 | return self._manifestctx.read() |
|
553 | 553 | |
|
554 | 554 | @propertycache |
|
555 | 555 | def _manifestctx(self): |
|
556 | 556 | return self._repo.manifestlog[self._changeset.manifest] |
|
557 | 557 | |
|
558 | 558 | @propertycache |
|
559 | 559 | def _manifestdelta(self): |
|
560 | 560 | return self._manifestctx.readdelta() |
|
561 | 561 | |
|
562 | 562 | @propertycache |
|
563 | 563 | def _parents(self): |
|
564 | 564 | repo = self._repo |
|
565 | 565 | p1, p2 = repo.changelog.parentrevs(self._rev) |
|
566 | 566 | if p2 == nullrev: |
|
567 | 567 | return [changectx(repo, p1)] |
|
568 | 568 | return [changectx(repo, p1), changectx(repo, p2)] |
|
569 | 569 | |
|
570 | 570 | def changeset(self): |
|
571 | 571 | c = self._changeset |
|
572 | 572 | return ( |
|
573 | 573 | c.manifest, |
|
574 | 574 | c.user, |
|
575 | 575 | c.date, |
|
576 | 576 | c.files, |
|
577 | 577 | c.description, |
|
578 | 578 | c.extra, |
|
579 | 579 | ) |
|
580 | 580 | def manifestnode(self): |
|
581 | 581 | return self._changeset.manifest |
|
582 | 582 | |
|
583 | 583 | def user(self): |
|
584 | 584 | return self._changeset.user |
|
585 | 585 | def date(self): |
|
586 | 586 | return self._changeset.date |
|
587 | 587 | def files(self): |
|
588 | 588 | return self._changeset.files |
|
589 | 589 | def description(self): |
|
590 | 590 | return self._changeset.description |
|
591 | 591 | def branch(self): |
|
592 | 592 | return encoding.tolocal(self._changeset.extra.get("branch")) |
|
593 | 593 | def closesbranch(self): |
|
594 | 594 | return 'close' in self._changeset.extra |
|
595 | 595 | def extra(self): |
|
596 | 596 | return self._changeset.extra |
|
597 | 597 | def tags(self): |
|
598 | 598 | return self._repo.nodetags(self._node) |
|
599 | 599 | def bookmarks(self): |
|
600 | 600 | return self._repo.nodebookmarks(self._node) |
|
601 | 601 | def phase(self): |
|
602 | 602 | return self._repo._phasecache.phase(self._repo, self._rev) |
|
603 | 603 | def hidden(self): |
|
604 | 604 | return self._rev in repoview.filterrevs(self._repo, 'visible') |
|
605 | 605 | |
|
606 | 606 | def children(self): |
|
607 | 607 | """return contexts for each child changeset""" |
|
608 | 608 | c = self._repo.changelog.children(self._node) |
|
609 | 609 | return [changectx(self._repo, x) for x in c] |
|
610 | 610 | |
|
611 | 611 | def ancestors(self): |
|
612 | 612 | for a in self._repo.changelog.ancestors([self._rev]): |
|
613 | 613 | yield changectx(self._repo, a) |
|
614 | 614 | |
|
615 | 615 | def descendants(self): |
|
616 | 616 | for d in self._repo.changelog.descendants([self._rev]): |
|
617 | 617 | yield changectx(self._repo, d) |
|
618 | 618 | |
|
619 | 619 | def filectx(self, path, fileid=None, filelog=None): |
|
620 | 620 | """get a file context from this changeset""" |
|
621 | 621 | if fileid is None: |
|
622 | 622 | fileid = self.filenode(path) |
|
623 | 623 | return filectx(self._repo, path, fileid=fileid, |
|
624 | 624 | changectx=self, filelog=filelog) |
|
625 | 625 | |
|
626 | 626 | def ancestor(self, c2, warn=False): |
|
627 | 627 | """return the "best" ancestor context of self and c2 |
|
628 | 628 | |
|
629 | 629 | If there are multiple candidates, it will show a message and check |
|
630 | 630 | merge.preferancestor configuration before falling back to the |
|
631 | 631 | revlog ancestor.""" |
|
632 | 632 | # deal with workingctxs |
|
633 | 633 | n2 = c2._node |
|
634 | 634 | if n2 is None: |
|
635 | 635 | n2 = c2._parents[0]._node |
|
636 | 636 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) |
|
637 | 637 | if not cahs: |
|
638 | 638 | anc = nullid |
|
639 | 639 | elif len(cahs) == 1: |
|
640 | 640 | anc = cahs[0] |
|
641 | 641 | else: |
|
642 | 642 | # experimental config: merge.preferancestor |
|
643 | 643 | for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']): |
|
644 | 644 | try: |
|
645 | 645 | ctx = changectx(self._repo, r) |
|
646 | 646 | except error.RepoLookupError: |
|
647 | 647 | continue |
|
648 | 648 | anc = ctx.node() |
|
649 | 649 | if anc in cahs: |
|
650 | 650 | break |
|
651 | 651 | else: |
|
652 | 652 | anc = self._repo.changelog.ancestor(self._node, n2) |
|
653 | 653 | if warn: |
|
654 | 654 | self._repo.ui.status( |
|
655 | 655 | (_("note: using %s as ancestor of %s and %s\n") % |
|
656 | 656 | (short(anc), short(self._node), short(n2))) + |
|
657 | 657 | ''.join(_(" alternatively, use --config " |
|
658 | 658 | "merge.preferancestor=%s\n") % |
|
659 | 659 | short(n) for n in sorted(cahs) if n != anc)) |
|
660 | 660 | return changectx(self._repo, anc) |
|
661 | 661 | |
|
662 | 662 | def descendant(self, other): |
|
663 | 663 | """True if other is descendant of this changeset""" |
|
664 | 664 | return self._repo.changelog.descendant(self._rev, other._rev) |
|
665 | 665 | |
|
666 | 666 | def walk(self, match): |
|
667 | 667 | '''Generates matching file names.''' |
|
668 | 668 | |
|
669 | 669 | # Wrap match.bad method to have message with nodeid |
|
670 | 670 | def bad(fn, msg): |
|
671 | 671 | # The manifest doesn't know about subrepos, so don't complain about |
|
672 | 672 | # paths into valid subrepos. |
|
673 | 673 | if any(fn == s or fn.startswith(s + '/') |
|
674 | 674 | for s in self.substate): |
|
675 | 675 | return |
|
676 | 676 | match.bad(fn, _('no such file in rev %s') % self) |
|
677 | 677 | |
|
678 | 678 | m = matchmod.badmatch(match, bad) |
|
679 | 679 | return self._manifest.walk(m) |
|
680 | 680 | |
|
681 | 681 | def matches(self, match): |
|
682 | 682 | return self.walk(match) |
|
683 | 683 | |
|
684 | 684 | class basefilectx(object): |
|
685 | 685 | """A filecontext object represents the common logic for its children: |
|
686 | 686 | filectx: read-only access to a filerevision that is already present |
|
687 | 687 | in the repo, |
|
688 | 688 | workingfilectx: a filecontext that represents files from the working |
|
689 | 689 | directory, |
|
690 | 690 | memfilectx: a filecontext that represents files in-memory.""" |
|
691 | 691 | def __new__(cls, repo, path, *args, **kwargs): |
|
692 | 692 | return super(basefilectx, cls).__new__(cls) |
|
693 | 693 | |
|
694 | 694 | @propertycache |
|
695 | 695 | def _filelog(self): |
|
696 | 696 | return self._repo.file(self._path) |
|
697 | 697 | |
|
698 | 698 | @propertycache |
|
699 | 699 | def _changeid(self): |
|
700 | if '_changeid' in self.__dict__: | |
|
700 | if r'_changeid' in self.__dict__: | |
|
701 | 701 | return self._changeid |
|
702 | elif '_changectx' in self.__dict__: | |
|
702 | elif r'_changectx' in self.__dict__: | |
|
703 | 703 | return self._changectx.rev() |
|
704 | elif '_descendantrev' in self.__dict__: | |
|
704 | elif r'_descendantrev' in self.__dict__: | |
|
705 | 705 | # this file context was created from a revision with a known |
|
706 | 706 | # descendant, we can (lazily) correct for linkrev aliases |
|
707 | 707 | return self._adjustlinkrev(self._descendantrev) |
|
708 | 708 | else: |
|
709 | 709 | return self._filelog.linkrev(self._filerev) |
|
710 | 710 | |
|
711 | 711 | @propertycache |
|
712 | 712 | def _filenode(self): |
|
713 | if '_fileid' in self.__dict__: | |
|
713 | if r'_fileid' in self.__dict__: | |
|
714 | 714 | return self._filelog.lookup(self._fileid) |
|
715 | 715 | else: |
|
716 | 716 | return self._changectx.filenode(self._path) |
|
717 | 717 | |
|
718 | 718 | @propertycache |
|
719 | 719 | def _filerev(self): |
|
720 | 720 | return self._filelog.rev(self._filenode) |
|
721 | 721 | |
|
722 | 722 | @propertycache |
|
723 | 723 | def _repopath(self): |
|
724 | 724 | return self._path |
|
725 | 725 | |
|
726 | 726 | def __nonzero__(self): |
|
727 | 727 | try: |
|
728 | 728 | self._filenode |
|
729 | 729 | return True |
|
730 | 730 | except error.LookupError: |
|
731 | 731 | # file is missing |
|
732 | 732 | return False |
|
733 | 733 | |
|
734 | 734 | __bool__ = __nonzero__ |
|
735 | 735 | |
|
736 | 736 | def __str__(self): |
|
737 | 737 | try: |
|
738 | 738 | return "%s@%s" % (self.path(), self._changectx) |
|
739 | 739 | except error.LookupError: |
|
740 | 740 | return "%s@???" % self.path() |
|
741 | 741 | |
|
742 | 742 | def __repr__(self): |
|
743 | 743 | return "<%s %s>" % (type(self).__name__, str(self)) |
|
744 | 744 | |
|
745 | 745 | def __hash__(self): |
|
746 | 746 | try: |
|
747 | 747 | return hash((self._path, self._filenode)) |
|
748 | 748 | except AttributeError: |
|
749 | 749 | return id(self) |
|
750 | 750 | |
|
751 | 751 | def __eq__(self, other): |
|
752 | 752 | try: |
|
753 | 753 | return (type(self) == type(other) and self._path == other._path |
|
754 | 754 | and self._filenode == other._filenode) |
|
755 | 755 | except AttributeError: |
|
756 | 756 | return False |
|
757 | 757 | |
|
758 | 758 | def __ne__(self, other): |
|
759 | 759 | return not (self == other) |
|
760 | 760 | |
|
761 | 761 | def filerev(self): |
|
762 | 762 | return self._filerev |
|
763 | 763 | def filenode(self): |
|
764 | 764 | return self._filenode |
|
765 | 765 | def flags(self): |
|
766 | 766 | return self._changectx.flags(self._path) |
|
767 | 767 | def filelog(self): |
|
768 | 768 | return self._filelog |
|
769 | 769 | def rev(self): |
|
770 | 770 | return self._changeid |
|
771 | 771 | def linkrev(self): |
|
772 | 772 | return self._filelog.linkrev(self._filerev) |
|
773 | 773 | def node(self): |
|
774 | 774 | return self._changectx.node() |
|
775 | 775 | def hex(self): |
|
776 | 776 | return self._changectx.hex() |
|
777 | 777 | def user(self): |
|
778 | 778 | return self._changectx.user() |
|
779 | 779 | def date(self): |
|
780 | 780 | return self._changectx.date() |
|
781 | 781 | def files(self): |
|
782 | 782 | return self._changectx.files() |
|
783 | 783 | def description(self): |
|
784 | 784 | return self._changectx.description() |
|
785 | 785 | def branch(self): |
|
786 | 786 | return self._changectx.branch() |
|
787 | 787 | def extra(self): |
|
788 | 788 | return self._changectx.extra() |
|
789 | 789 | def phase(self): |
|
790 | 790 | return self._changectx.phase() |
|
791 | 791 | def phasestr(self): |
|
792 | 792 | return self._changectx.phasestr() |
|
793 | 793 | def manifest(self): |
|
794 | 794 | return self._changectx.manifest() |
|
795 | 795 | def changectx(self): |
|
796 | 796 | return self._changectx |
|
797 | 797 | def repo(self): |
|
798 | 798 | return self._repo |
|
799 | 799 | |
|
800 | 800 | def path(self): |
|
801 | 801 | return self._path |
|
802 | 802 | |
|
803 | 803 | def isbinary(self): |
|
804 | 804 | try: |
|
805 | 805 | return util.binary(self.data()) |
|
806 | 806 | except IOError: |
|
807 | 807 | return False |
|
808 | 808 | def isexec(self): |
|
809 | 809 | return 'x' in self.flags() |
|
810 | 810 | def islink(self): |
|
811 | 811 | return 'l' in self.flags() |
|
812 | 812 | |
|
813 | 813 | def isabsent(self): |
|
814 | 814 | """whether this filectx represents a file not in self._changectx |
|
815 | 815 | |
|
816 | 816 | This is mainly for merge code to detect change/delete conflicts. This is |
|
817 | 817 | expected to be True for all subclasses of basectx.""" |
|
818 | 818 | return False |
|
819 | 819 | |
|
820 | 820 | _customcmp = False |
|
821 | 821 | def cmp(self, fctx): |
|
822 | 822 | """compare with other file context |
|
823 | 823 | |
|
824 | 824 | returns True if different than fctx. |
|
825 | 825 | """ |
|
826 | 826 | if fctx._customcmp: |
|
827 | 827 | return fctx.cmp(self) |
|
828 | 828 | |
|
829 | 829 | if (fctx._filenode is None |
|
830 | 830 | and (self._repo._encodefilterpats |
|
831 | 831 | # if file data starts with '\1\n', empty metadata block is |
|
832 | 832 | # prepended, which adds 4 bytes to filelog.size(). |
|
833 | 833 | or self.size() - 4 == fctx.size()) |
|
834 | 834 | or self.size() == fctx.size()): |
|
835 | 835 | return self._filelog.cmp(self._filenode, fctx.data()) |
|
836 | 836 | |
|
837 | 837 | return True |
|
838 | 838 | |
|
839 | 839 | def _adjustlinkrev(self, srcrev, inclusive=False): |
|
840 | 840 | """return the first ancestor of <srcrev> introducing <fnode> |
|
841 | 841 | |
|
842 | 842 | If the linkrev of the file revision does not point to an ancestor of |
|
843 | 843 | srcrev, we'll walk down the ancestors until we find one introducing |
|
844 | 844 | this file revision. |
|
845 | 845 | |
|
846 | 846 | :srcrev: the changeset revision we search ancestors from |
|
847 | 847 | :inclusive: if true, the src revision will also be checked |
|
848 | 848 | """ |
|
849 | 849 | repo = self._repo |
|
850 | 850 | cl = repo.unfiltered().changelog |
|
851 | 851 | mfl = repo.manifestlog |
|
852 | 852 | # fetch the linkrev |
|
853 | 853 | lkr = self.linkrev() |
|
854 | 854 | # hack to reuse ancestor computation when searching for renames |
|
855 | 855 | memberanc = getattr(self, '_ancestrycontext', None) |
|
856 | 856 | iteranc = None |
|
857 | 857 | if srcrev is None: |
|
858 | 858 | # wctx case, used by workingfilectx during mergecopy |
|
859 | 859 | revs = [p.rev() for p in self._repo[None].parents()] |
|
860 | 860 | inclusive = True # we skipped the real (revless) source |
|
861 | 861 | else: |
|
862 | 862 | revs = [srcrev] |
|
863 | 863 | if memberanc is None: |
|
864 | 864 | memberanc = iteranc = cl.ancestors(revs, lkr, |
|
865 | 865 | inclusive=inclusive) |
|
866 | 866 | # check if this linkrev is an ancestor of srcrev |
|
867 | 867 | if lkr not in memberanc: |
|
868 | 868 | if iteranc is None: |
|
869 | 869 | iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) |
|
870 | 870 | fnode = self._filenode |
|
871 | 871 | path = self._path |
|
872 | 872 | for a in iteranc: |
|
873 | 873 | ac = cl.read(a) # get changeset data (we avoid object creation) |
|
874 | 874 | if path in ac[3]: # checking the 'files' field. |
|
875 | 875 | # The file has been touched, check if the content is |
|
876 | 876 | # similar to the one we search for. |
|
877 | 877 | if fnode == mfl[ac[0]].readfast().get(path): |
|
878 | 878 | return a |
|
879 | 879 | # In theory, we should never get out of that loop without a result. |
|
880 | 880 | # But if manifest uses a buggy file revision (not children of the |
|
881 | 881 | # one it replaces) we could. Such a buggy situation will likely |
|
882 | 882 | # result is crash somewhere else at to some point. |
|
883 | 883 | return lkr |
|
884 | 884 | |
|
885 | 885 | def introrev(self): |
|
886 | 886 | """return the rev of the changeset which introduced this file revision |
|
887 | 887 | |
|
888 | 888 | This method is different from linkrev because it take into account the |
|
889 | 889 | changeset the filectx was created from. It ensures the returned |
|
890 | 890 | revision is one of its ancestors. This prevents bugs from |
|
891 | 891 | 'linkrev-shadowing' when a file revision is used by multiple |
|
892 | 892 | changesets. |
|
893 | 893 | """ |
|
894 | 894 | lkr = self.linkrev() |
|
895 | 895 | attrs = vars(self) |
|
896 | 896 | noctx = not ('_changeid' in attrs or '_changectx' in attrs) |
|
897 | 897 | if noctx or self.rev() == lkr: |
|
898 | 898 | return self.linkrev() |
|
899 | 899 | return self._adjustlinkrev(self.rev(), inclusive=True) |
|
900 | 900 | |
|
901 | 901 | def _parentfilectx(self, path, fileid, filelog): |
|
902 | 902 | """create parent filectx keeping ancestry info for _adjustlinkrev()""" |
|
903 | 903 | fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) |
|
904 | 904 | if '_changeid' in vars(self) or '_changectx' in vars(self): |
|
905 | 905 | # If self is associated with a changeset (probably explicitly |
|
906 | 906 | # fed), ensure the created filectx is associated with a |
|
907 | 907 | # changeset that is an ancestor of self.changectx. |
|
908 | 908 | # This lets us later use _adjustlinkrev to get a correct link. |
|
909 | 909 | fctx._descendantrev = self.rev() |
|
910 | 910 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) |
|
911 | 911 | elif '_descendantrev' in vars(self): |
|
912 | 912 | # Otherwise propagate _descendantrev if we have one associated. |
|
913 | 913 | fctx._descendantrev = self._descendantrev |
|
914 | 914 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) |
|
915 | 915 | return fctx |
|
916 | 916 | |
|
917 | 917 | def parents(self): |
|
918 | 918 | _path = self._path |
|
919 | 919 | fl = self._filelog |
|
920 | 920 | parents = self._filelog.parents(self._filenode) |
|
921 | 921 | pl = [(_path, node, fl) for node in parents if node != nullid] |
|
922 | 922 | |
|
923 | 923 | r = fl.renamed(self._filenode) |
|
924 | 924 | if r: |
|
925 | 925 | # - In the simple rename case, both parent are nullid, pl is empty. |
|
926 | 926 | # - In case of merge, only one of the parent is null id and should |
|
927 | 927 | # be replaced with the rename information. This parent is -always- |
|
928 | 928 | # the first one. |
|
929 | 929 | # |
|
930 | 930 | # As null id have always been filtered out in the previous list |
|
931 | 931 | # comprehension, inserting to 0 will always result in "replacing |
|
932 | 932 | # first nullid parent with rename information. |
|
933 | 933 | pl.insert(0, (r[0], r[1], self._repo.file(r[0]))) |
|
934 | 934 | |
|
935 | 935 | return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl] |
|
936 | 936 | |
|
937 | 937 | def p1(self): |
|
938 | 938 | return self.parents()[0] |
|
939 | 939 | |
|
940 | 940 | def p2(self): |
|
941 | 941 | p = self.parents() |
|
942 | 942 | if len(p) == 2: |
|
943 | 943 | return p[1] |
|
944 | 944 | return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) |
|
945 | 945 | |
|
946 | 946 | def annotate(self, follow=False, linenumber=False, diffopts=None): |
|
947 | 947 | '''returns a list of tuples of ((ctx, number), line) for each line |
|
948 | 948 | in the file, where ctx is the filectx of the node where |
|
949 | 949 | that line was last changed; if linenumber parameter is true, number is |
|
950 | 950 | the line number at the first appearance in the managed file, otherwise, |
|
951 | 951 | number has a fixed value of False. |
|
952 | 952 | ''' |
|
953 | 953 | |
|
954 | 954 | def lines(text): |
|
955 | 955 | if text.endswith("\n"): |
|
956 | 956 | return text.count("\n") |
|
957 | 957 | return text.count("\n") + int(bool(text)) |
|
958 | 958 | |
|
959 | 959 | if linenumber: |
|
960 | 960 | def decorate(text, rev): |
|
961 | 961 | return ([(rev, i) for i in xrange(1, lines(text) + 1)], text) |
|
962 | 962 | else: |
|
963 | 963 | def decorate(text, rev): |
|
964 | 964 | return ([(rev, False)] * lines(text), text) |
|
965 | 965 | |
|
966 | 966 | def pair(parent, child): |
|
967 | 967 | blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts) |
|
968 | 968 | for (a1, a2, b1, b2), t in blocks: |
|
969 | 969 | # Changed blocks ('!') or blocks made only of blank lines ('~') |
|
970 | 970 | # belong to the child. |
|
971 | 971 | if t == '=': |
|
972 | 972 | child[0][b1:b2] = parent[0][a1:a2] |
|
973 | 973 | return child |
|
974 | 974 | |
|
975 | 975 | getlog = util.lrucachefunc(lambda x: self._repo.file(x)) |
|
976 | 976 | |
|
977 | 977 | def parents(f): |
|
978 | 978 | # Cut _descendantrev here to mitigate the penalty of lazy linkrev |
|
979 | 979 | # adjustment. Otherwise, p._adjustlinkrev() would walk changelog |
|
980 | 980 | # from the topmost introrev (= srcrev) down to p.linkrev() if it |
|
981 | 981 | # isn't an ancestor of the srcrev. |
|
982 | 982 | f._changeid |
|
983 | 983 | pl = f.parents() |
|
984 | 984 | |
|
985 | 985 | # Don't return renamed parents if we aren't following. |
|
986 | 986 | if not follow: |
|
987 | 987 | pl = [p for p in pl if p.path() == f.path()] |
|
988 | 988 | |
|
989 | 989 | # renamed filectx won't have a filelog yet, so set it |
|
990 | 990 | # from the cache to save time |
|
991 | 991 | for p in pl: |
|
992 | 992 | if not '_filelog' in p.__dict__: |
|
993 | 993 | p._filelog = getlog(p.path()) |
|
994 | 994 | |
|
995 | 995 | return pl |
|
996 | 996 | |
|
997 | 997 | # use linkrev to find the first changeset where self appeared |
|
998 | 998 | base = self |
|
999 | 999 | introrev = self.introrev() |
|
1000 | 1000 | if self.rev() != introrev: |
|
1001 | 1001 | base = self.filectx(self.filenode(), changeid=introrev) |
|
1002 | 1002 | if getattr(base, '_ancestrycontext', None) is None: |
|
1003 | 1003 | cl = self._repo.changelog |
|
1004 | 1004 | if introrev is None: |
|
1005 | 1005 | # wctx is not inclusive, but works because _ancestrycontext |
|
1006 | 1006 | # is used to test filelog revisions |
|
1007 | 1007 | ac = cl.ancestors([p.rev() for p in base.parents()], |
|
1008 | 1008 | inclusive=True) |
|
1009 | 1009 | else: |
|
1010 | 1010 | ac = cl.ancestors([introrev], inclusive=True) |
|
1011 | 1011 | base._ancestrycontext = ac |
|
1012 | 1012 | |
|
1013 | 1013 | # This algorithm would prefer to be recursive, but Python is a |
|
1014 | 1014 | # bit recursion-hostile. Instead we do an iterative |
|
1015 | 1015 | # depth-first search. |
|
1016 | 1016 | |
|
1017 | 1017 | # 1st DFS pre-calculates pcache and needed |
|
1018 | 1018 | visit = [base] |
|
1019 | 1019 | pcache = {} |
|
1020 | 1020 | needed = {base: 1} |
|
1021 | 1021 | while visit: |
|
1022 | 1022 | f = visit.pop() |
|
1023 | 1023 | if f in pcache: |
|
1024 | 1024 | continue |
|
1025 | 1025 | pl = parents(f) |
|
1026 | 1026 | pcache[f] = pl |
|
1027 | 1027 | for p in pl: |
|
1028 | 1028 | needed[p] = needed.get(p, 0) + 1 |
|
1029 | 1029 | if p not in pcache: |
|
1030 | 1030 | visit.append(p) |
|
1031 | 1031 | |
|
1032 | 1032 | # 2nd DFS does the actual annotate |
|
1033 | 1033 | visit[:] = [base] |
|
1034 | 1034 | hist = {} |
|
1035 | 1035 | while visit: |
|
1036 | 1036 | f = visit[-1] |
|
1037 | 1037 | if f in hist: |
|
1038 | 1038 | visit.pop() |
|
1039 | 1039 | continue |
|
1040 | 1040 | |
|
1041 | 1041 | ready = True |
|
1042 | 1042 | pl = pcache[f] |
|
1043 | 1043 | for p in pl: |
|
1044 | 1044 | if p not in hist: |
|
1045 | 1045 | ready = False |
|
1046 | 1046 | visit.append(p) |
|
1047 | 1047 | if ready: |
|
1048 | 1048 | visit.pop() |
|
1049 | 1049 | curr = decorate(f.data(), f) |
|
1050 | 1050 | for p in pl: |
|
1051 | 1051 | curr = pair(hist[p], curr) |
|
1052 | 1052 | if needed[p] == 1: |
|
1053 | 1053 | del hist[p] |
|
1054 | 1054 | del needed[p] |
|
1055 | 1055 | else: |
|
1056 | 1056 | needed[p] -= 1 |
|
1057 | 1057 | |
|
1058 | 1058 | hist[f] = curr |
|
1059 | 1059 | del pcache[f] |
|
1060 | 1060 | |
|
1061 | 1061 | return zip(hist[base][0], hist[base][1].splitlines(True)) |
|
1062 | 1062 | |
|
1063 | 1063 | def ancestors(self, followfirst=False): |
|
1064 | 1064 | visit = {} |
|
1065 | 1065 | c = self |
|
1066 | 1066 | if followfirst: |
|
1067 | 1067 | cut = 1 |
|
1068 | 1068 | else: |
|
1069 | 1069 | cut = None |
|
1070 | 1070 | |
|
1071 | 1071 | while True: |
|
1072 | 1072 | for parent in c.parents()[:cut]: |
|
1073 | 1073 | visit[(parent.linkrev(), parent.filenode())] = parent |
|
1074 | 1074 | if not visit: |
|
1075 | 1075 | break |
|
1076 | 1076 | c = visit.pop(max(visit)) |
|
1077 | 1077 | yield c |
|
1078 | 1078 | |
|
1079 | 1079 | class filectx(basefilectx): |
|
1080 | 1080 | """A filecontext object makes access to data related to a particular |
|
1081 | 1081 | filerevision convenient.""" |
|
1082 | 1082 | def __init__(self, repo, path, changeid=None, fileid=None, |
|
1083 | 1083 | filelog=None, changectx=None): |
|
1084 | 1084 | """changeid can be a changeset revision, node, or tag. |
|
1085 | 1085 | fileid can be a file revision or node.""" |
|
1086 | 1086 | self._repo = repo |
|
1087 | 1087 | self._path = path |
|
1088 | 1088 | |
|
1089 | 1089 | assert (changeid is not None |
|
1090 | 1090 | or fileid is not None |
|
1091 | 1091 | or changectx is not None), \ |
|
1092 | 1092 | ("bad args: changeid=%r, fileid=%r, changectx=%r" |
|
1093 | 1093 | % (changeid, fileid, changectx)) |
|
1094 | 1094 | |
|
1095 | 1095 | if filelog is not None: |
|
1096 | 1096 | self._filelog = filelog |
|
1097 | 1097 | |
|
1098 | 1098 | if changeid is not None: |
|
1099 | 1099 | self._changeid = changeid |
|
1100 | 1100 | if changectx is not None: |
|
1101 | 1101 | self._changectx = changectx |
|
1102 | 1102 | if fileid is not None: |
|
1103 | 1103 | self._fileid = fileid |
|
1104 | 1104 | |
|
1105 | 1105 | @propertycache |
|
1106 | 1106 | def _changectx(self): |
|
1107 | 1107 | try: |
|
1108 | 1108 | return changectx(self._repo, self._changeid) |
|
1109 | 1109 | except error.FilteredRepoLookupError: |
|
1110 | 1110 | # Linkrev may point to any revision in the repository. When the |
|
1111 | 1111 | # repository is filtered this may lead to `filectx` trying to build |
|
1112 | 1112 | # `changectx` for filtered revision. In such case we fallback to |
|
1113 | 1113 | # creating `changectx` on the unfiltered version of the reposition. |
|
1114 | 1114 | # This fallback should not be an issue because `changectx` from |
|
1115 | 1115 | # `filectx` are not used in complex operations that care about |
|
1116 | 1116 | # filtering. |
|
1117 | 1117 | # |
|
1118 | 1118 | # This fallback is a cheap and dirty fix that prevent several |
|
1119 | 1119 | # crashes. It does not ensure the behavior is correct. However the |
|
1120 | 1120 | # behavior was not correct before filtering either and "incorrect |
|
1121 | 1121 | # behavior" is seen as better as "crash" |
|
1122 | 1122 | # |
|
1123 | 1123 | # Linkrevs have several serious troubles with filtering that are |
|
1124 | 1124 | # complicated to solve. Proper handling of the issue here should be |
|
1125 | 1125 | # considered when solving linkrev issue are on the table. |
|
1126 | 1126 | return changectx(self._repo.unfiltered(), self._changeid) |
|
1127 | 1127 | |
|
1128 | 1128 | def filectx(self, fileid, changeid=None): |
|
1129 | 1129 | '''opens an arbitrary revision of the file without |
|
1130 | 1130 | opening a new filelog''' |
|
1131 | 1131 | return filectx(self._repo, self._path, fileid=fileid, |
|
1132 | 1132 | filelog=self._filelog, changeid=changeid) |
|
1133 | 1133 | |
|
1134 | 1134 | def rawdata(self): |
|
1135 | 1135 | return self._filelog.revision(self._filenode, raw=True) |
|
1136 | 1136 | |
|
1137 | 1137 | def data(self): |
|
1138 | 1138 | try: |
|
1139 | 1139 | return self._filelog.read(self._filenode) |
|
1140 | 1140 | except error.CensoredNodeError: |
|
1141 | 1141 | if self._repo.ui.config("censor", "policy", "abort") == "ignore": |
|
1142 | 1142 | return "" |
|
1143 | 1143 | raise error.Abort(_("censored node: %s") % short(self._filenode), |
|
1144 | 1144 | hint=_("set censor.policy to ignore errors")) |
|
1145 | 1145 | |
|
1146 | 1146 | def size(self): |
|
1147 | 1147 | return self._filelog.size(self._filerev) |
|
1148 | 1148 | |
|
1149 | 1149 | def renamed(self): |
|
1150 | 1150 | """check if file was actually renamed in this changeset revision |
|
1151 | 1151 | |
|
1152 | 1152 | If rename logged in file revision, we report copy for changeset only |
|
1153 | 1153 | if file revisions linkrev points back to the changeset in question |
|
1154 | 1154 | or both changeset parents contain different file revisions. |
|
1155 | 1155 | """ |
|
1156 | 1156 | |
|
1157 | 1157 | renamed = self._filelog.renamed(self._filenode) |
|
1158 | 1158 | if not renamed: |
|
1159 | 1159 | return renamed |
|
1160 | 1160 | |
|
1161 | 1161 | if self.rev() == self.linkrev(): |
|
1162 | 1162 | return renamed |
|
1163 | 1163 | |
|
1164 | 1164 | name = self.path() |
|
1165 | 1165 | fnode = self._filenode |
|
1166 | 1166 | for p in self._changectx.parents(): |
|
1167 | 1167 | try: |
|
1168 | 1168 | if fnode == p.filenode(name): |
|
1169 | 1169 | return None |
|
1170 | 1170 | except error.LookupError: |
|
1171 | 1171 | pass |
|
1172 | 1172 | return renamed |
|
1173 | 1173 | |
|
1174 | 1174 | def children(self): |
|
1175 | 1175 | # hard for renames |
|
1176 | 1176 | c = self._filelog.children(self._filenode) |
|
1177 | 1177 | return [filectx(self._repo, self._path, fileid=x, |
|
1178 | 1178 | filelog=self._filelog) for x in c] |
|
1179 | 1179 | |
|
1180 | 1180 | def _changesrange(fctx1, fctx2, linerange2, diffopts): |
|
1181 | 1181 | """Return `(diffinrange, linerange1)` where `diffinrange` is True |
|
1182 | 1182 | if diff from fctx2 to fctx1 has changes in linerange2 and |
|
1183 | 1183 | `linerange1` is the new line range for fctx1. |
|
1184 | 1184 | """ |
|
1185 | 1185 | blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts) |
|
1186 | 1186 | filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2) |
|
1187 | 1187 | diffinrange = any(stype == '!' for _, stype in filteredblocks) |
|
1188 | 1188 | return diffinrange, linerange1 |
|
1189 | 1189 | |
|
1190 | 1190 | def blockancestors(fctx, fromline, toline, followfirst=False): |
|
1191 | 1191 | """Yield ancestors of `fctx` with respect to the block of lines within |
|
1192 | 1192 | `fromline`-`toline` range. |
|
1193 | 1193 | """ |
|
1194 | 1194 | diffopts = patch.diffopts(fctx._repo.ui) |
|
1195 | 1195 | introrev = fctx.introrev() |
|
1196 | 1196 | if fctx.rev() != introrev: |
|
1197 | 1197 | fctx = fctx.filectx(fctx.filenode(), changeid=introrev) |
|
1198 | 1198 | visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))} |
|
1199 | 1199 | while visit: |
|
1200 | 1200 | c, linerange2 = visit.pop(max(visit)) |
|
1201 | 1201 | pl = c.parents() |
|
1202 | 1202 | if followfirst: |
|
1203 | 1203 | pl = pl[:1] |
|
1204 | 1204 | if not pl: |
|
1205 | 1205 | # The block originates from the initial revision. |
|
1206 | 1206 | yield c, linerange2 |
|
1207 | 1207 | continue |
|
1208 | 1208 | inrange = False |
|
1209 | 1209 | for p in pl: |
|
1210 | 1210 | inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts) |
|
1211 | 1211 | inrange = inrange or inrangep |
|
1212 | 1212 | if linerange1[0] == linerange1[1]: |
|
1213 | 1213 | # Parent's linerange is empty, meaning that the block got |
|
1214 | 1214 | # introduced in this revision; no need to go futher in this |
|
1215 | 1215 | # branch. |
|
1216 | 1216 | continue |
|
1217 | 1217 | # Set _descendantrev with 'c' (a known descendant) so that, when |
|
1218 | 1218 | # _adjustlinkrev is called for 'p', it receives this descendant |
|
1219 | 1219 | # (as srcrev) instead possibly topmost introrev. |
|
1220 | 1220 | p._descendantrev = c.rev() |
|
1221 | 1221 | visit[p.linkrev(), p.filenode()] = p, linerange1 |
|
1222 | 1222 | if inrange: |
|
1223 | 1223 | yield c, linerange2 |
|
1224 | 1224 | |
|
1225 | 1225 | def blockdescendants(fctx, fromline, toline): |
|
1226 | 1226 | """Yield descendants of `fctx` with respect to the block of lines within |
|
1227 | 1227 | `fromline`-`toline` range. |
|
1228 | 1228 | """ |
|
1229 | 1229 | # First possibly yield 'fctx' if it has changes in range with respect to |
|
1230 | 1230 | # its parents. |
|
1231 | 1231 | try: |
|
1232 | 1232 | c, linerange1 = next(blockancestors(fctx, fromline, toline)) |
|
1233 | 1233 | except StopIteration: |
|
1234 | 1234 | pass |
|
1235 | 1235 | else: |
|
1236 | 1236 | if c == fctx: |
|
1237 | 1237 | yield c, linerange1 |
|
1238 | 1238 | |
|
1239 | 1239 | diffopts = patch.diffopts(fctx._repo.ui) |
|
1240 | 1240 | fl = fctx.filelog() |
|
1241 | 1241 | seen = {fctx.filerev(): (fctx, (fromline, toline))} |
|
1242 | 1242 | for i in fl.descendants([fctx.filerev()]): |
|
1243 | 1243 | c = fctx.filectx(i) |
|
1244 | 1244 | inrange = False |
|
1245 | 1245 | for x in fl.parentrevs(i): |
|
1246 | 1246 | try: |
|
1247 | 1247 | p, linerange2 = seen[x] |
|
1248 | 1248 | except KeyError: |
|
1249 | 1249 | # nullrev or other branch |
|
1250 | 1250 | continue |
|
1251 | 1251 | inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts) |
|
1252 | 1252 | inrange = inrange or inrangep |
|
1253 | 1253 | # If revision 'i' has been seen (it's a merge), we assume that its |
|
1254 | 1254 | # line range is the same independently of which parents was used |
|
1255 | 1255 | # to compute it. |
|
1256 | 1256 | assert i not in seen or seen[i][1] == linerange1, ( |
|
1257 | 1257 | 'computed line range for %s is not consistent between ' |
|
1258 | 1258 | 'ancestor branches' % c) |
|
1259 | 1259 | seen[i] = c, linerange1 |
|
1260 | 1260 | if inrange: |
|
1261 | 1261 | yield c, linerange1 |
|
1262 | 1262 | |
|
1263 | 1263 | class committablectx(basectx): |
|
1264 | 1264 | """A committablectx object provides common functionality for a context that |
|
1265 | 1265 | wants the ability to commit, e.g. workingctx or memctx.""" |
|
1266 | 1266 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1267 | 1267 | changes=None): |
|
1268 | 1268 | self._repo = repo |
|
1269 | 1269 | self._rev = None |
|
1270 | 1270 | self._node = None |
|
1271 | 1271 | self._text = text |
|
1272 | 1272 | if date: |
|
1273 | 1273 | self._date = util.parsedate(date) |
|
1274 | 1274 | if user: |
|
1275 | 1275 | self._user = user |
|
1276 | 1276 | if changes: |
|
1277 | 1277 | self._status = changes |
|
1278 | 1278 | |
|
1279 | 1279 | self._extra = {} |
|
1280 | 1280 | if extra: |
|
1281 | 1281 | self._extra = extra.copy() |
|
1282 | 1282 | if 'branch' not in self._extra: |
|
1283 | 1283 | try: |
|
1284 | 1284 | branch = encoding.fromlocal(self._repo.dirstate.branch()) |
|
1285 | 1285 | except UnicodeDecodeError: |
|
1286 | 1286 | raise error.Abort(_('branch name not in UTF-8!')) |
|
1287 | 1287 | self._extra['branch'] = branch |
|
1288 | 1288 | if self._extra['branch'] == '': |
|
1289 | 1289 | self._extra['branch'] = 'default' |
|
1290 | 1290 | |
|
1291 | 1291 | def __str__(self): |
|
1292 | 1292 | return str(self._parents[0]) + "+" |
|
1293 | 1293 | |
|
1294 | 1294 | def __nonzero__(self): |
|
1295 | 1295 | return True |
|
1296 | 1296 | |
|
1297 | 1297 | __bool__ = __nonzero__ |
|
1298 | 1298 | |
|
1299 | 1299 | def _buildflagfunc(self): |
|
1300 | 1300 | # Create a fallback function for getting file flags when the |
|
1301 | 1301 | # filesystem doesn't support them |
|
1302 | 1302 | |
|
1303 | 1303 | copiesget = self._repo.dirstate.copies().get |
|
1304 | 1304 | parents = self.parents() |
|
1305 | 1305 | if len(parents) < 2: |
|
1306 | 1306 | # when we have one parent, it's easy: copy from parent |
|
1307 | 1307 | man = parents[0].manifest() |
|
1308 | 1308 | def func(f): |
|
1309 | 1309 | f = copiesget(f, f) |
|
1310 | 1310 | return man.flags(f) |
|
1311 | 1311 | else: |
|
1312 | 1312 | # merges are tricky: we try to reconstruct the unstored |
|
1313 | 1313 | # result from the merge (issue1802) |
|
1314 | 1314 | p1, p2 = parents |
|
1315 | 1315 | pa = p1.ancestor(p2) |
|
1316 | 1316 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
1317 | 1317 | |
|
1318 | 1318 | def func(f): |
|
1319 | 1319 | f = copiesget(f, f) # may be wrong for merges with copies |
|
1320 | 1320 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) |
|
1321 | 1321 | if fl1 == fl2: |
|
1322 | 1322 | return fl1 |
|
1323 | 1323 | if fl1 == fla: |
|
1324 | 1324 | return fl2 |
|
1325 | 1325 | if fl2 == fla: |
|
1326 | 1326 | return fl1 |
|
1327 | 1327 | return '' # punt for conflicts |
|
1328 | 1328 | |
|
1329 | 1329 | return func |
|
1330 | 1330 | |
|
1331 | 1331 | @propertycache |
|
1332 | 1332 | def _flagfunc(self): |
|
1333 | 1333 | return self._repo.dirstate.flagfunc(self._buildflagfunc) |
|
1334 | 1334 | |
|
1335 | 1335 | @propertycache |
|
1336 | 1336 | def _status(self): |
|
1337 | 1337 | return self._repo.status() |
|
1338 | 1338 | |
|
1339 | 1339 | @propertycache |
|
1340 | 1340 | def _user(self): |
|
1341 | 1341 | return self._repo.ui.username() |
|
1342 | 1342 | |
|
1343 | 1343 | @propertycache |
|
1344 | 1344 | def _date(self): |
|
1345 | 1345 | return util.makedate() |
|
1346 | 1346 | |
|
1347 | 1347 | def subrev(self, subpath): |
|
1348 | 1348 | return None |
|
1349 | 1349 | |
|
1350 | 1350 | def manifestnode(self): |
|
1351 | 1351 | return None |
|
1352 | 1352 | def user(self): |
|
1353 | 1353 | return self._user or self._repo.ui.username() |
|
1354 | 1354 | def date(self): |
|
1355 | 1355 | return self._date |
|
1356 | 1356 | def description(self): |
|
1357 | 1357 | return self._text |
|
1358 | 1358 | def files(self): |
|
1359 | 1359 | return sorted(self._status.modified + self._status.added + |
|
1360 | 1360 | self._status.removed) |
|
1361 | 1361 | |
|
1362 | 1362 | def modified(self): |
|
1363 | 1363 | return self._status.modified |
|
1364 | 1364 | def added(self): |
|
1365 | 1365 | return self._status.added |
|
1366 | 1366 | def removed(self): |
|
1367 | 1367 | return self._status.removed |
|
1368 | 1368 | def deleted(self): |
|
1369 | 1369 | return self._status.deleted |
|
1370 | 1370 | def branch(self): |
|
1371 | 1371 | return encoding.tolocal(self._extra['branch']) |
|
1372 | 1372 | def closesbranch(self): |
|
1373 | 1373 | return 'close' in self._extra |
|
1374 | 1374 | def extra(self): |
|
1375 | 1375 | return self._extra |
|
1376 | 1376 | |
|
1377 | 1377 | def tags(self): |
|
1378 | 1378 | return [] |
|
1379 | 1379 | |
|
1380 | 1380 | def bookmarks(self): |
|
1381 | 1381 | b = [] |
|
1382 | 1382 | for p in self.parents(): |
|
1383 | 1383 | b.extend(p.bookmarks()) |
|
1384 | 1384 | return b |
|
1385 | 1385 | |
|
1386 | 1386 | def phase(self): |
|
1387 | 1387 | phase = phases.draft # default phase to draft |
|
1388 | 1388 | for p in self.parents(): |
|
1389 | 1389 | phase = max(phase, p.phase()) |
|
1390 | 1390 | return phase |
|
1391 | 1391 | |
|
1392 | 1392 | def hidden(self): |
|
1393 | 1393 | return False |
|
1394 | 1394 | |
|
1395 | 1395 | def children(self): |
|
1396 | 1396 | return [] |
|
1397 | 1397 | |
|
1398 | 1398 | def flags(self, path): |
|
1399 | if '_manifest' in self.__dict__: | |
|
1399 | if r'_manifest' in self.__dict__: | |
|
1400 | 1400 | try: |
|
1401 | 1401 | return self._manifest.flags(path) |
|
1402 | 1402 | except KeyError: |
|
1403 | 1403 | return '' |
|
1404 | 1404 | |
|
1405 | 1405 | try: |
|
1406 | 1406 | return self._flagfunc(path) |
|
1407 | 1407 | except OSError: |
|
1408 | 1408 | return '' |
|
1409 | 1409 | |
|
1410 | 1410 | def ancestor(self, c2): |
|
1411 | 1411 | """return the "best" ancestor context of self and c2""" |
|
1412 | 1412 | return self._parents[0].ancestor(c2) # punt on two parents for now |
|
1413 | 1413 | |
|
1414 | 1414 | def walk(self, match): |
|
1415 | 1415 | '''Generates matching file names.''' |
|
1416 | 1416 | return sorted(self._repo.dirstate.walk(match, sorted(self.substate), |
|
1417 | 1417 | True, False)) |
|
1418 | 1418 | |
|
1419 | 1419 | def matches(self, match): |
|
1420 | 1420 | return sorted(self._repo.dirstate.matches(match)) |
|
1421 | 1421 | |
|
1422 | 1422 | def ancestors(self): |
|
1423 | 1423 | for p in self._parents: |
|
1424 | 1424 | yield p |
|
1425 | 1425 | for a in self._repo.changelog.ancestors( |
|
1426 | 1426 | [p.rev() for p in self._parents]): |
|
1427 | 1427 | yield changectx(self._repo, a) |
|
1428 | 1428 | |
|
1429 | 1429 | def markcommitted(self, node): |
|
1430 | 1430 | """Perform post-commit cleanup necessary after committing this ctx |
|
1431 | 1431 | |
|
1432 | 1432 | Specifically, this updates backing stores this working context |
|
1433 | 1433 | wraps to reflect the fact that the changes reflected by this |
|
1434 | 1434 | workingctx have been committed. For example, it marks |
|
1435 | 1435 | modified and added files as normal in the dirstate. |
|
1436 | 1436 | |
|
1437 | 1437 | """ |
|
1438 | 1438 | |
|
1439 | 1439 | self._repo.dirstate.beginparentchange() |
|
1440 | 1440 | for f in self.modified() + self.added(): |
|
1441 | 1441 | self._repo.dirstate.normal(f) |
|
1442 | 1442 | for f in self.removed(): |
|
1443 | 1443 | self._repo.dirstate.drop(f) |
|
1444 | 1444 | self._repo.dirstate.setparents(node) |
|
1445 | 1445 | self._repo.dirstate.endparentchange() |
|
1446 | 1446 | |
|
1447 | 1447 | # write changes out explicitly, because nesting wlock at |
|
1448 | 1448 | # runtime may prevent 'wlock.release()' in 'repo.commit()' |
|
1449 | 1449 | # from immediately doing so for subsequent changing files |
|
1450 | 1450 | self._repo.dirstate.write(self._repo.currenttransaction()) |
|
1451 | 1451 | |
|
1452 | 1452 | class workingctx(committablectx): |
|
1453 | 1453 | """A workingctx object makes access to data related to |
|
1454 | 1454 | the current working directory convenient. |
|
1455 | 1455 | date - any valid date string or (unixtime, offset), or None. |
|
1456 | 1456 | user - username string, or None. |
|
1457 | 1457 | extra - a dictionary of extra values, or None. |
|
1458 | 1458 | changes - a list of file lists as returned by localrepo.status() |
|
1459 | 1459 | or None to use the repository status. |
|
1460 | 1460 | """ |
|
1461 | 1461 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1462 | 1462 | changes=None): |
|
1463 | 1463 | super(workingctx, self).__init__(repo, text, user, date, extra, changes) |
|
1464 | 1464 | |
|
1465 | 1465 | def __iter__(self): |
|
1466 | 1466 | d = self._repo.dirstate |
|
1467 | 1467 | for f in d: |
|
1468 | 1468 | if d[f] != 'r': |
|
1469 | 1469 | yield f |
|
1470 | 1470 | |
|
1471 | 1471 | def __contains__(self, key): |
|
1472 | 1472 | return self._repo.dirstate[key] not in "?r" |
|
1473 | 1473 | |
|
1474 | 1474 | def hex(self): |
|
1475 | 1475 | return hex(wdirid) |
|
1476 | 1476 | |
|
1477 | 1477 | @propertycache |
|
1478 | 1478 | def _parents(self): |
|
1479 | 1479 | p = self._repo.dirstate.parents() |
|
1480 | 1480 | if p[1] == nullid: |
|
1481 | 1481 | p = p[:-1] |
|
1482 | 1482 | return [changectx(self._repo, x) for x in p] |
|
1483 | 1483 | |
|
1484 | 1484 | def filectx(self, path, filelog=None): |
|
1485 | 1485 | """get a file context from the working directory""" |
|
1486 | 1486 | return workingfilectx(self._repo, path, workingctx=self, |
|
1487 | 1487 | filelog=filelog) |
|
1488 | 1488 | |
|
1489 | 1489 | def dirty(self, missing=False, merge=True, branch=True): |
|
1490 | 1490 | "check whether a working directory is modified" |
|
1491 | 1491 | # check subrepos first |
|
1492 | 1492 | for s in sorted(self.substate): |
|
1493 | 1493 | if self.sub(s).dirty(): |
|
1494 | 1494 | return True |
|
1495 | 1495 | # check current working dir |
|
1496 | 1496 | return ((merge and self.p2()) or |
|
1497 | 1497 | (branch and self.branch() != self.p1().branch()) or |
|
1498 | 1498 | self.modified() or self.added() or self.removed() or |
|
1499 | 1499 | (missing and self.deleted())) |
|
1500 | 1500 | |
|
1501 | 1501 | def add(self, list, prefix=""): |
|
1502 | 1502 | join = lambda f: os.path.join(prefix, f) |
|
1503 | 1503 | with self._repo.wlock(): |
|
1504 | 1504 | ui, ds = self._repo.ui, self._repo.dirstate |
|
1505 | 1505 | rejected = [] |
|
1506 | 1506 | lstat = self._repo.wvfs.lstat |
|
1507 | 1507 | for f in list: |
|
1508 | 1508 | scmutil.checkportable(ui, join(f)) |
|
1509 | 1509 | try: |
|
1510 | 1510 | st = lstat(f) |
|
1511 | 1511 | except OSError: |
|
1512 | 1512 | ui.warn(_("%s does not exist!\n") % join(f)) |
|
1513 | 1513 | rejected.append(f) |
|
1514 | 1514 | continue |
|
1515 | 1515 | if st.st_size > 10000000: |
|
1516 | 1516 | ui.warn(_("%s: up to %d MB of RAM may be required " |
|
1517 | 1517 | "to manage this file\n" |
|
1518 | 1518 | "(use 'hg revert %s' to cancel the " |
|
1519 | 1519 | "pending addition)\n") |
|
1520 | 1520 | % (f, 3 * st.st_size // 1000000, join(f))) |
|
1521 | 1521 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1522 | 1522 | ui.warn(_("%s not added: only files and symlinks " |
|
1523 | 1523 | "supported currently\n") % join(f)) |
|
1524 | 1524 | rejected.append(f) |
|
1525 | 1525 | elif ds[f] in 'amn': |
|
1526 | 1526 | ui.warn(_("%s already tracked!\n") % join(f)) |
|
1527 | 1527 | elif ds[f] == 'r': |
|
1528 | 1528 | ds.normallookup(f) |
|
1529 | 1529 | else: |
|
1530 | 1530 | ds.add(f) |
|
1531 | 1531 | return rejected |
|
1532 | 1532 | |
|
1533 | 1533 | def forget(self, files, prefix=""): |
|
1534 | 1534 | join = lambda f: os.path.join(prefix, f) |
|
1535 | 1535 | with self._repo.wlock(): |
|
1536 | 1536 | rejected = [] |
|
1537 | 1537 | for f in files: |
|
1538 | 1538 | if f not in self._repo.dirstate: |
|
1539 | 1539 | self._repo.ui.warn(_("%s not tracked!\n") % join(f)) |
|
1540 | 1540 | rejected.append(f) |
|
1541 | 1541 | elif self._repo.dirstate[f] != 'a': |
|
1542 | 1542 | self._repo.dirstate.remove(f) |
|
1543 | 1543 | else: |
|
1544 | 1544 | self._repo.dirstate.drop(f) |
|
1545 | 1545 | return rejected |
|
1546 | 1546 | |
|
1547 | 1547 | def undelete(self, list): |
|
1548 | 1548 | pctxs = self.parents() |
|
1549 | 1549 | with self._repo.wlock(): |
|
1550 | 1550 | for f in list: |
|
1551 | 1551 | if self._repo.dirstate[f] != 'r': |
|
1552 | 1552 | self._repo.ui.warn(_("%s not removed!\n") % f) |
|
1553 | 1553 | else: |
|
1554 | 1554 | fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f] |
|
1555 | 1555 | t = fctx.data() |
|
1556 | 1556 | self._repo.wwrite(f, t, fctx.flags()) |
|
1557 | 1557 | self._repo.dirstate.normal(f) |
|
1558 | 1558 | |
|
1559 | 1559 | def copy(self, source, dest): |
|
1560 | 1560 | try: |
|
1561 | 1561 | st = self._repo.wvfs.lstat(dest) |
|
1562 | 1562 | except OSError as err: |
|
1563 | 1563 | if err.errno != errno.ENOENT: |
|
1564 | 1564 | raise |
|
1565 | 1565 | self._repo.ui.warn(_("%s does not exist!\n") % dest) |
|
1566 | 1566 | return |
|
1567 | 1567 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1568 | 1568 | self._repo.ui.warn(_("copy failed: %s is not a file or a " |
|
1569 | 1569 | "symbolic link\n") % dest) |
|
1570 | 1570 | else: |
|
1571 | 1571 | with self._repo.wlock(): |
|
1572 | 1572 | if self._repo.dirstate[dest] in '?': |
|
1573 | 1573 | self._repo.dirstate.add(dest) |
|
1574 | 1574 | elif self._repo.dirstate[dest] in 'r': |
|
1575 | 1575 | self._repo.dirstate.normallookup(dest) |
|
1576 | 1576 | self._repo.dirstate.copy(source, dest) |
|
1577 | 1577 | |
|
1578 | 1578 | def match(self, pats=None, include=None, exclude=None, default='glob', |
|
1579 | 1579 | listsubrepos=False, badfn=None): |
|
1580 | 1580 | if pats is None: |
|
1581 | 1581 | pats = [] |
|
1582 | 1582 | r = self._repo |
|
1583 | 1583 | |
|
1584 | 1584 | # Only a case insensitive filesystem needs magic to translate user input |
|
1585 | 1585 | # to actual case in the filesystem. |
|
1586 | 1586 | matcherfunc = matchmod.match |
|
1587 | 1587 | if not util.fscasesensitive(r.root): |
|
1588 | 1588 | matcherfunc = matchmod.icasefsmatcher |
|
1589 | 1589 | return matcherfunc(r.root, r.getcwd(), pats, |
|
1590 | 1590 | include, exclude, default, |
|
1591 | 1591 | auditor=r.auditor, ctx=self, |
|
1592 | 1592 | listsubrepos=listsubrepos, badfn=badfn) |
|
1593 | 1593 | |
|
1594 | 1594 | def _filtersuspectsymlink(self, files): |
|
1595 | 1595 | if not files or self._repo.dirstate._checklink: |
|
1596 | 1596 | return files |
|
1597 | 1597 | |
|
1598 | 1598 | # Symlink placeholders may get non-symlink-like contents |
|
1599 | 1599 | # via user error or dereferencing by NFS or Samba servers, |
|
1600 | 1600 | # so we filter out any placeholders that don't look like a |
|
1601 | 1601 | # symlink |
|
1602 | 1602 | sane = [] |
|
1603 | 1603 | for f in files: |
|
1604 | 1604 | if self.flags(f) == 'l': |
|
1605 | 1605 | d = self[f].data() |
|
1606 | 1606 | if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d): |
|
1607 | 1607 | self._repo.ui.debug('ignoring suspect symlink placeholder' |
|
1608 | 1608 | ' "%s"\n' % f) |
|
1609 | 1609 | continue |
|
1610 | 1610 | sane.append(f) |
|
1611 | 1611 | return sane |
|
1612 | 1612 | |
|
1613 | 1613 | def _checklookup(self, files): |
|
1614 | 1614 | # check for any possibly clean files |
|
1615 | 1615 | if not files: |
|
1616 | 1616 | return [], [] |
|
1617 | 1617 | |
|
1618 | 1618 | modified = [] |
|
1619 | 1619 | fixup = [] |
|
1620 | 1620 | pctx = self._parents[0] |
|
1621 | 1621 | # do a full compare of any files that might have changed |
|
1622 | 1622 | for f in sorted(files): |
|
1623 | 1623 | if (f not in pctx or self.flags(f) != pctx.flags(f) |
|
1624 | 1624 | or pctx[f].cmp(self[f])): |
|
1625 | 1625 | modified.append(f) |
|
1626 | 1626 | else: |
|
1627 | 1627 | fixup.append(f) |
|
1628 | 1628 | |
|
1629 | 1629 | # update dirstate for files that are actually clean |
|
1630 | 1630 | if fixup: |
|
1631 | 1631 | try: |
|
1632 | 1632 | # updating the dirstate is optional |
|
1633 | 1633 | # so we don't wait on the lock |
|
1634 | 1634 | # wlock can invalidate the dirstate, so cache normal _after_ |
|
1635 | 1635 | # taking the lock |
|
1636 | 1636 | with self._repo.wlock(False): |
|
1637 | 1637 | normal = self._repo.dirstate.normal |
|
1638 | 1638 | for f in fixup: |
|
1639 | 1639 | normal(f) |
|
1640 | 1640 | # write changes out explicitly, because nesting |
|
1641 | 1641 | # wlock at runtime may prevent 'wlock.release()' |
|
1642 | 1642 | # after this block from doing so for subsequent |
|
1643 | 1643 | # changing files |
|
1644 | 1644 | self._repo.dirstate.write(self._repo.currenttransaction()) |
|
1645 | 1645 | except error.LockError: |
|
1646 | 1646 | pass |
|
1647 | 1647 | return modified, fixup |
|
1648 | 1648 | |
|
1649 | 1649 | def _dirstatestatus(self, match=None, ignored=False, clean=False, |
|
1650 | 1650 | unknown=False): |
|
1651 | 1651 | '''Gets the status from the dirstate -- internal use only.''' |
|
1652 | 1652 | listignored, listclean, listunknown = ignored, clean, unknown |
|
1653 | 1653 | match = match or matchmod.always(self._repo.root, self._repo.getcwd()) |
|
1654 | 1654 | subrepos = [] |
|
1655 | 1655 | if '.hgsub' in self: |
|
1656 | 1656 | subrepos = sorted(self.substate) |
|
1657 | 1657 | cmp, s = self._repo.dirstate.status(match, subrepos, listignored, |
|
1658 | 1658 | listclean, listunknown) |
|
1659 | 1659 | |
|
1660 | 1660 | # check for any possibly clean files |
|
1661 | 1661 | if cmp: |
|
1662 | 1662 | modified2, fixup = self._checklookup(cmp) |
|
1663 | 1663 | s.modified.extend(modified2) |
|
1664 | 1664 | |
|
1665 | 1665 | # update dirstate for files that are actually clean |
|
1666 | 1666 | if fixup and listclean: |
|
1667 | 1667 | s.clean.extend(fixup) |
|
1668 | 1668 | |
|
1669 | 1669 | if match.always(): |
|
1670 | 1670 | # cache for performance |
|
1671 | 1671 | if s.unknown or s.ignored or s.clean: |
|
1672 | 1672 | # "_status" is cached with list*=False in the normal route |
|
1673 | 1673 | self._status = scmutil.status(s.modified, s.added, s.removed, |
|
1674 | 1674 | s.deleted, [], [], []) |
|
1675 | 1675 | else: |
|
1676 | 1676 | self._status = s |
|
1677 | 1677 | |
|
1678 | 1678 | return s |
|
1679 | 1679 | |
|
1680 | 1680 | @propertycache |
|
1681 | 1681 | def _manifest(self): |
|
1682 | 1682 | """generate a manifest corresponding to the values in self._status |
|
1683 | 1683 | |
|
1684 | 1684 | This reuse the file nodeid from parent, but we use special node |
|
1685 | 1685 | identifiers for added and modified files. This is used by manifests |
|
1686 | 1686 | merge to see that files are different and by update logic to avoid |
|
1687 | 1687 | deleting newly added files. |
|
1688 | 1688 | """ |
|
1689 | 1689 | return self._buildstatusmanifest(self._status) |
|
1690 | 1690 | |
|
1691 | 1691 | def _buildstatusmanifest(self, status): |
|
1692 | 1692 | """Builds a manifest that includes the given status results.""" |
|
1693 | 1693 | parents = self.parents() |
|
1694 | 1694 | |
|
1695 | 1695 | man = parents[0].manifest().copy() |
|
1696 | 1696 | |
|
1697 | 1697 | ff = self._flagfunc |
|
1698 | 1698 | for i, l in ((addednodeid, status.added), |
|
1699 | 1699 | (modifiednodeid, status.modified)): |
|
1700 | 1700 | for f in l: |
|
1701 | 1701 | man[f] = i |
|
1702 | 1702 | try: |
|
1703 | 1703 | man.setflag(f, ff(f)) |
|
1704 | 1704 | except OSError: |
|
1705 | 1705 | pass |
|
1706 | 1706 | |
|
1707 | 1707 | for f in status.deleted + status.removed: |
|
1708 | 1708 | if f in man: |
|
1709 | 1709 | del man[f] |
|
1710 | 1710 | |
|
1711 | 1711 | return man |
|
1712 | 1712 | |
|
1713 | 1713 | def _buildstatus(self, other, s, match, listignored, listclean, |
|
1714 | 1714 | listunknown): |
|
1715 | 1715 | """build a status with respect to another context |
|
1716 | 1716 | |
|
1717 | 1717 | This includes logic for maintaining the fast path of status when |
|
1718 | 1718 | comparing the working directory against its parent, which is to skip |
|
1719 | 1719 | building a new manifest if self (working directory) is not comparing |
|
1720 | 1720 | against its parent (repo['.']). |
|
1721 | 1721 | """ |
|
1722 | 1722 | s = self._dirstatestatus(match, listignored, listclean, listunknown) |
|
1723 | 1723 | # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, |
|
1724 | 1724 | # might have accidentally ended up with the entire contents of the file |
|
1725 | 1725 | # they are supposed to be linking to. |
|
1726 | 1726 | s.modified[:] = self._filtersuspectsymlink(s.modified) |
|
1727 | 1727 | if other != self._repo['.']: |
|
1728 | 1728 | s = super(workingctx, self)._buildstatus(other, s, match, |
|
1729 | 1729 | listignored, listclean, |
|
1730 | 1730 | listunknown) |
|
1731 | 1731 | return s |
|
1732 | 1732 | |
|
1733 | 1733 | def _matchstatus(self, other, match): |
|
1734 | 1734 | """override the match method with a filter for directory patterns |
|
1735 | 1735 | |
|
1736 | 1736 | We use inheritance to customize the match.bad method only in cases of |
|
1737 | 1737 | workingctx since it belongs only to the working directory when |
|
1738 | 1738 | comparing against the parent changeset. |
|
1739 | 1739 | |
|
1740 | 1740 | If we aren't comparing against the working directory's parent, then we |
|
1741 | 1741 | just use the default match object sent to us. |
|
1742 | 1742 | """ |
|
1743 | 1743 | superself = super(workingctx, self) |
|
1744 | 1744 | match = superself._matchstatus(other, match) |
|
1745 | 1745 | if other != self._repo['.']: |
|
1746 | 1746 | def bad(f, msg): |
|
1747 | 1747 | # 'f' may be a directory pattern from 'match.files()', |
|
1748 | 1748 | # so 'f not in ctx1' is not enough |
|
1749 | 1749 | if f not in other and not other.hasdir(f): |
|
1750 | 1750 | self._repo.ui.warn('%s: %s\n' % |
|
1751 | 1751 | (self._repo.dirstate.pathto(f), msg)) |
|
1752 | 1752 | match.bad = bad |
|
1753 | 1753 | return match |
|
1754 | 1754 | |
|
1755 | 1755 | class committablefilectx(basefilectx): |
|
1756 | 1756 | """A committablefilectx provides common functionality for a file context |
|
1757 | 1757 | that wants the ability to commit, e.g. workingfilectx or memfilectx.""" |
|
1758 | 1758 | def __init__(self, repo, path, filelog=None, ctx=None): |
|
1759 | 1759 | self._repo = repo |
|
1760 | 1760 | self._path = path |
|
1761 | 1761 | self._changeid = None |
|
1762 | 1762 | self._filerev = self._filenode = None |
|
1763 | 1763 | |
|
1764 | 1764 | if filelog is not None: |
|
1765 | 1765 | self._filelog = filelog |
|
1766 | 1766 | if ctx: |
|
1767 | 1767 | self._changectx = ctx |
|
1768 | 1768 | |
|
1769 | 1769 | def __nonzero__(self): |
|
1770 | 1770 | return True |
|
1771 | 1771 | |
|
1772 | 1772 | __bool__ = __nonzero__ |
|
1773 | 1773 | |
|
1774 | 1774 | def linkrev(self): |
|
1775 | 1775 | # linked to self._changectx no matter if file is modified or not |
|
1776 | 1776 | return self.rev() |
|
1777 | 1777 | |
|
1778 | 1778 | def parents(self): |
|
1779 | 1779 | '''return parent filectxs, following copies if necessary''' |
|
1780 | 1780 | def filenode(ctx, path): |
|
1781 | 1781 | return ctx._manifest.get(path, nullid) |
|
1782 | 1782 | |
|
1783 | 1783 | path = self._path |
|
1784 | 1784 | fl = self._filelog |
|
1785 | 1785 | pcl = self._changectx._parents |
|
1786 | 1786 | renamed = self.renamed() |
|
1787 | 1787 | |
|
1788 | 1788 | if renamed: |
|
1789 | 1789 | pl = [renamed + (None,)] |
|
1790 | 1790 | else: |
|
1791 | 1791 | pl = [(path, filenode(pcl[0], path), fl)] |
|
1792 | 1792 | |
|
1793 | 1793 | for pc in pcl[1:]: |
|
1794 | 1794 | pl.append((path, filenode(pc, path), fl)) |
|
1795 | 1795 | |
|
1796 | 1796 | return [self._parentfilectx(p, fileid=n, filelog=l) |
|
1797 | 1797 | for p, n, l in pl if n != nullid] |
|
1798 | 1798 | |
|
1799 | 1799 | def children(self): |
|
1800 | 1800 | return [] |
|
1801 | 1801 | |
|
1802 | 1802 | class workingfilectx(committablefilectx): |
|
1803 | 1803 | """A workingfilectx object makes access to data related to a particular |
|
1804 | 1804 | file in the working directory convenient.""" |
|
1805 | 1805 | def __init__(self, repo, path, filelog=None, workingctx=None): |
|
1806 | 1806 | super(workingfilectx, self).__init__(repo, path, filelog, workingctx) |
|
1807 | 1807 | |
|
1808 | 1808 | @propertycache |
|
1809 | 1809 | def _changectx(self): |
|
1810 | 1810 | return workingctx(self._repo) |
|
1811 | 1811 | |
|
1812 | 1812 | def data(self): |
|
1813 | 1813 | return self._repo.wread(self._path) |
|
1814 | 1814 | def renamed(self): |
|
1815 | 1815 | rp = self._repo.dirstate.copied(self._path) |
|
1816 | 1816 | if not rp: |
|
1817 | 1817 | return None |
|
1818 | 1818 | return rp, self._changectx._parents[0]._manifest.get(rp, nullid) |
|
1819 | 1819 | |
|
1820 | 1820 | def size(self): |
|
1821 | 1821 | return self._repo.wvfs.lstat(self._path).st_size |
|
1822 | 1822 | def date(self): |
|
1823 | 1823 | t, tz = self._changectx.date() |
|
1824 | 1824 | try: |
|
1825 | 1825 | return (self._repo.wvfs.lstat(self._path).st_mtime, tz) |
|
1826 | 1826 | except OSError as err: |
|
1827 | 1827 | if err.errno != errno.ENOENT: |
|
1828 | 1828 | raise |
|
1829 | 1829 | return (t, tz) |
|
1830 | 1830 | |
|
1831 | 1831 | def cmp(self, fctx): |
|
1832 | 1832 | """compare with other file context |
|
1833 | 1833 | |
|
1834 | 1834 | returns True if different than fctx. |
|
1835 | 1835 | """ |
|
1836 | 1836 | # fctx should be a filectx (not a workingfilectx) |
|
1837 | 1837 | # invert comparison to reuse the same code path |
|
1838 | 1838 | return fctx.cmp(self) |
|
1839 | 1839 | |
|
1840 | 1840 | def remove(self, ignoremissing=False): |
|
1841 | 1841 | """wraps unlink for a repo's working directory""" |
|
1842 | 1842 | self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing) |
|
1843 | 1843 | |
|
1844 | 1844 | def write(self, data, flags): |
|
1845 | 1845 | """wraps repo.wwrite""" |
|
1846 | 1846 | self._repo.wwrite(self._path, data, flags) |
|
1847 | 1847 | |
|
1848 | 1848 | class workingcommitctx(workingctx): |
|
1849 | 1849 | """A workingcommitctx object makes access to data related to |
|
1850 | 1850 | the revision being committed convenient. |
|
1851 | 1851 | |
|
1852 | 1852 | This hides changes in the working directory, if they aren't |
|
1853 | 1853 | committed in this context. |
|
1854 | 1854 | """ |
|
1855 | 1855 | def __init__(self, repo, changes, |
|
1856 | 1856 | text="", user=None, date=None, extra=None): |
|
1857 | 1857 | super(workingctx, self).__init__(repo, text, user, date, extra, |
|
1858 | 1858 | changes) |
|
1859 | 1859 | |
|
1860 | 1860 | def _dirstatestatus(self, match=None, ignored=False, clean=False, |
|
1861 | 1861 | unknown=False): |
|
1862 | 1862 | """Return matched files only in ``self._status`` |
|
1863 | 1863 | |
|
1864 | 1864 | Uncommitted files appear "clean" via this context, even if |
|
1865 | 1865 | they aren't actually so in the working directory. |
|
1866 | 1866 | """ |
|
1867 | 1867 | match = match or matchmod.always(self._repo.root, self._repo.getcwd()) |
|
1868 | 1868 | if clean: |
|
1869 | 1869 | clean = [f for f in self._manifest if f not in self._changedset] |
|
1870 | 1870 | else: |
|
1871 | 1871 | clean = [] |
|
1872 | 1872 | return scmutil.status([f for f in self._status.modified if match(f)], |
|
1873 | 1873 | [f for f in self._status.added if match(f)], |
|
1874 | 1874 | [f for f in self._status.removed if match(f)], |
|
1875 | 1875 | [], [], [], clean) |
|
1876 | 1876 | |
|
1877 | 1877 | @propertycache |
|
1878 | 1878 | def _changedset(self): |
|
1879 | 1879 | """Return the set of files changed in this context |
|
1880 | 1880 | """ |
|
1881 | 1881 | changed = set(self._status.modified) |
|
1882 | 1882 | changed.update(self._status.added) |
|
1883 | 1883 | changed.update(self._status.removed) |
|
1884 | 1884 | return changed |
|
1885 | 1885 | |
|
1886 | 1886 | def makecachingfilectxfn(func): |
|
1887 | 1887 | """Create a filectxfn that caches based on the path. |
|
1888 | 1888 | |
|
1889 | 1889 | We can't use util.cachefunc because it uses all arguments as the cache |
|
1890 | 1890 | key and this creates a cycle since the arguments include the repo and |
|
1891 | 1891 | memctx. |
|
1892 | 1892 | """ |
|
1893 | 1893 | cache = {} |
|
1894 | 1894 | |
|
1895 | 1895 | def getfilectx(repo, memctx, path): |
|
1896 | 1896 | if path not in cache: |
|
1897 | 1897 | cache[path] = func(repo, memctx, path) |
|
1898 | 1898 | return cache[path] |
|
1899 | 1899 | |
|
1900 | 1900 | return getfilectx |
|
1901 | 1901 | |
|
1902 | 1902 | class memctx(committablectx): |
|
1903 | 1903 | """Use memctx to perform in-memory commits via localrepo.commitctx(). |
|
1904 | 1904 | |
|
1905 | 1905 | Revision information is supplied at initialization time while |
|
1906 | 1906 | related files data and is made available through a callback |
|
1907 | 1907 | mechanism. 'repo' is the current localrepo, 'parents' is a |
|
1908 | 1908 | sequence of two parent revisions identifiers (pass None for every |
|
1909 | 1909 | missing parent), 'text' is the commit message and 'files' lists |
|
1910 | 1910 | names of files touched by the revision (normalized and relative to |
|
1911 | 1911 | repository root). |
|
1912 | 1912 | |
|
1913 | 1913 | filectxfn(repo, memctx, path) is a callable receiving the |
|
1914 | 1914 | repository, the current memctx object and the normalized path of |
|
1915 | 1915 | requested file, relative to repository root. It is fired by the |
|
1916 | 1916 | commit function for every file in 'files', but calls order is |
|
1917 | 1917 | undefined. If the file is available in the revision being |
|
1918 | 1918 | committed (updated or added), filectxfn returns a memfilectx |
|
1919 | 1919 | object. If the file was removed, filectxfn return None for recent |
|
1920 | 1920 | Mercurial. Moved files are represented by marking the source file |
|
1921 | 1921 | removed and the new file added with copy information (see |
|
1922 | 1922 | memfilectx). |
|
1923 | 1923 | |
|
1924 | 1924 | user receives the committer name and defaults to current |
|
1925 | 1925 | repository username, date is the commit date in any format |
|
1926 | 1926 | supported by util.parsedate() and defaults to current date, extra |
|
1927 | 1927 | is a dictionary of metadata or is left empty. |
|
1928 | 1928 | """ |
|
1929 | 1929 | |
|
1930 | 1930 | # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. |
|
1931 | 1931 | # Extensions that need to retain compatibility across Mercurial 3.1 can use |
|
1932 | 1932 | # this field to determine what to do in filectxfn. |
|
1933 | 1933 | _returnnoneformissingfiles = True |
|
1934 | 1934 | |
|
1935 | 1935 | def __init__(self, repo, parents, text, files, filectxfn, user=None, |
|
1936 | 1936 | date=None, extra=None, editor=False): |
|
1937 | 1937 | super(memctx, self).__init__(repo, text, user, date, extra) |
|
1938 | 1938 | self._rev = None |
|
1939 | 1939 | self._node = None |
|
1940 | 1940 | parents = [(p or nullid) for p in parents] |
|
1941 | 1941 | p1, p2 = parents |
|
1942 | 1942 | self._parents = [changectx(self._repo, p) for p in (p1, p2)] |
|
1943 | 1943 | files = sorted(set(files)) |
|
1944 | 1944 | self._files = files |
|
1945 | 1945 | self.substate = {} |
|
1946 | 1946 | |
|
1947 | 1947 | # if store is not callable, wrap it in a function |
|
1948 | 1948 | if not callable(filectxfn): |
|
1949 | 1949 | def getfilectx(repo, memctx, path): |
|
1950 | 1950 | fctx = filectxfn[path] |
|
1951 | 1951 | # this is weird but apparently we only keep track of one parent |
|
1952 | 1952 | # (why not only store that instead of a tuple?) |
|
1953 | 1953 | copied = fctx.renamed() |
|
1954 | 1954 | if copied: |
|
1955 | 1955 | copied = copied[0] |
|
1956 | 1956 | return memfilectx(repo, path, fctx.data(), |
|
1957 | 1957 | islink=fctx.islink(), isexec=fctx.isexec(), |
|
1958 | 1958 | copied=copied, memctx=memctx) |
|
1959 | 1959 | self._filectxfn = getfilectx |
|
1960 | 1960 | else: |
|
1961 | 1961 | # memoizing increases performance for e.g. vcs convert scenarios. |
|
1962 | 1962 | self._filectxfn = makecachingfilectxfn(filectxfn) |
|
1963 | 1963 | |
|
1964 | 1964 | if extra: |
|
1965 | 1965 | self._extra = extra.copy() |
|
1966 | 1966 | else: |
|
1967 | 1967 | self._extra = {} |
|
1968 | 1968 | |
|
1969 | 1969 | if self._extra.get('branch', '') == '': |
|
1970 | 1970 | self._extra['branch'] = 'default' |
|
1971 | 1971 | |
|
1972 | 1972 | if editor: |
|
1973 | 1973 | self._text = editor(self._repo, self, []) |
|
1974 | 1974 | self._repo.savecommitmessage(self._text) |
|
1975 | 1975 | |
|
1976 | 1976 | def filectx(self, path, filelog=None): |
|
1977 | 1977 | """get a file context from the working directory |
|
1978 | 1978 | |
|
1979 | 1979 | Returns None if file doesn't exist and should be removed.""" |
|
1980 | 1980 | return self._filectxfn(self._repo, self, path) |
|
1981 | 1981 | |
|
1982 | 1982 | def commit(self): |
|
1983 | 1983 | """commit context to the repo""" |
|
1984 | 1984 | return self._repo.commitctx(self) |
|
1985 | 1985 | |
|
1986 | 1986 | @propertycache |
|
1987 | 1987 | def _manifest(self): |
|
1988 | 1988 | """generate a manifest based on the return values of filectxfn""" |
|
1989 | 1989 | |
|
1990 | 1990 | # keep this simple for now; just worry about p1 |
|
1991 | 1991 | pctx = self._parents[0] |
|
1992 | 1992 | man = pctx.manifest().copy() |
|
1993 | 1993 | |
|
1994 | 1994 | for f in self._status.modified: |
|
1995 | 1995 | p1node = nullid |
|
1996 | 1996 | p2node = nullid |
|
1997 | 1997 | p = pctx[f].parents() # if file isn't in pctx, check p2? |
|
1998 | 1998 | if len(p) > 0: |
|
1999 | 1999 | p1node = p[0].filenode() |
|
2000 | 2000 | if len(p) > 1: |
|
2001 | 2001 | p2node = p[1].filenode() |
|
2002 | 2002 | man[f] = revlog.hash(self[f].data(), p1node, p2node) |
|
2003 | 2003 | |
|
2004 | 2004 | for f in self._status.added: |
|
2005 | 2005 | man[f] = revlog.hash(self[f].data(), nullid, nullid) |
|
2006 | 2006 | |
|
2007 | 2007 | for f in self._status.removed: |
|
2008 | 2008 | if f in man: |
|
2009 | 2009 | del man[f] |
|
2010 | 2010 | |
|
2011 | 2011 | return man |
|
2012 | 2012 | |
|
2013 | 2013 | @propertycache |
|
2014 | 2014 | def _status(self): |
|
2015 | 2015 | """Calculate exact status from ``files`` specified at construction |
|
2016 | 2016 | """ |
|
2017 | 2017 | man1 = self.p1().manifest() |
|
2018 | 2018 | p2 = self._parents[1] |
|
2019 | 2019 | # "1 < len(self._parents)" can't be used for checking |
|
2020 | 2020 | # existence of the 2nd parent, because "memctx._parents" is |
|
2021 | 2021 | # explicitly initialized by the list, of which length is 2. |
|
2022 | 2022 | if p2.node() != nullid: |
|
2023 | 2023 | man2 = p2.manifest() |
|
2024 | 2024 | managing = lambda f: f in man1 or f in man2 |
|
2025 | 2025 | else: |
|
2026 | 2026 | managing = lambda f: f in man1 |
|
2027 | 2027 | |
|
2028 | 2028 | modified, added, removed = [], [], [] |
|
2029 | 2029 | for f in self._files: |
|
2030 | 2030 | if not managing(f): |
|
2031 | 2031 | added.append(f) |
|
2032 | 2032 | elif self[f]: |
|
2033 | 2033 | modified.append(f) |
|
2034 | 2034 | else: |
|
2035 | 2035 | removed.append(f) |
|
2036 | 2036 | |
|
2037 | 2037 | return scmutil.status(modified, added, removed, [], [], [], []) |
|
2038 | 2038 | |
|
2039 | 2039 | class memfilectx(committablefilectx): |
|
2040 | 2040 | """memfilectx represents an in-memory file to commit. |
|
2041 | 2041 | |
|
2042 | 2042 | See memctx and committablefilectx for more details. |
|
2043 | 2043 | """ |
|
2044 | 2044 | def __init__(self, repo, path, data, islink=False, |
|
2045 | 2045 | isexec=False, copied=None, memctx=None): |
|
2046 | 2046 | """ |
|
2047 | 2047 | path is the normalized file path relative to repository root. |
|
2048 | 2048 | data is the file content as a string. |
|
2049 | 2049 | islink is True if the file is a symbolic link. |
|
2050 | 2050 | isexec is True if the file is executable. |
|
2051 | 2051 | copied is the source file path if current file was copied in the |
|
2052 | 2052 | revision being committed, or None.""" |
|
2053 | 2053 | super(memfilectx, self).__init__(repo, path, None, memctx) |
|
2054 | 2054 | self._data = data |
|
2055 | 2055 | self._flags = (islink and 'l' or '') + (isexec and 'x' or '') |
|
2056 | 2056 | self._copied = None |
|
2057 | 2057 | if copied: |
|
2058 | 2058 | self._copied = (copied, nullid) |
|
2059 | 2059 | |
|
2060 | 2060 | def data(self): |
|
2061 | 2061 | return self._data |
|
2062 | 2062 | def size(self): |
|
2063 | 2063 | return len(self.data()) |
|
2064 | 2064 | def flags(self): |
|
2065 | 2065 | return self._flags |
|
2066 | 2066 | def renamed(self): |
|
2067 | 2067 | return self._copied |
|
2068 | 2068 | |
|
2069 | 2069 | def remove(self, ignoremissing=False): |
|
2070 | 2070 | """wraps unlink for a repo's working directory""" |
|
2071 | 2071 | # need to figure out what to do here |
|
2072 | 2072 | del self._changectx[self._path] |
|
2073 | 2073 | |
|
2074 | 2074 | def write(self, data, flags): |
|
2075 | 2075 | """wraps repo.wwrite""" |
|
2076 | 2076 | self._data = data |
|
2077 | 2077 | |
|
2078 | 2078 | class metadataonlyctx(committablectx): |
|
2079 | 2079 | """Like memctx but it's reusing the manifest of different commit. |
|
2080 | 2080 | Intended to be used by lightweight operations that are creating |
|
2081 | 2081 | metadata-only changes. |
|
2082 | 2082 | |
|
2083 | 2083 | Revision information is supplied at initialization time. 'repo' is the |
|
2084 | 2084 | current localrepo, 'ctx' is original revision which manifest we're reuisng |
|
2085 | 2085 | 'parents' is a sequence of two parent revisions identifiers (pass None for |
|
2086 | 2086 | every missing parent), 'text' is the commit. |
|
2087 | 2087 | |
|
2088 | 2088 | user receives the committer name and defaults to current repository |
|
2089 | 2089 | username, date is the commit date in any format supported by |
|
2090 | 2090 | util.parsedate() and defaults to current date, extra is a dictionary of |
|
2091 | 2091 | metadata or is left empty. |
|
2092 | 2092 | """ |
|
2093 | 2093 | def __new__(cls, repo, originalctx, *args, **kwargs): |
|
2094 | 2094 | return super(metadataonlyctx, cls).__new__(cls, repo) |
|
2095 | 2095 | |
|
2096 | 2096 | def __init__(self, repo, originalctx, parents, text, user=None, date=None, |
|
2097 | 2097 | extra=None, editor=False): |
|
2098 | 2098 | super(metadataonlyctx, self).__init__(repo, text, user, date, extra) |
|
2099 | 2099 | self._rev = None |
|
2100 | 2100 | self._node = None |
|
2101 | 2101 | self._originalctx = originalctx |
|
2102 | 2102 | self._manifestnode = originalctx.manifestnode() |
|
2103 | 2103 | parents = [(p or nullid) for p in parents] |
|
2104 | 2104 | p1, p2 = self._parents = [changectx(self._repo, p) for p in parents] |
|
2105 | 2105 | |
|
2106 | 2106 | # sanity check to ensure that the reused manifest parents are |
|
2107 | 2107 | # manifests of our commit parents |
|
2108 | 2108 | mp1, mp2 = self.manifestctx().parents |
|
2109 | 2109 | if p1 != nullid and p1.manifestnode() != mp1: |
|
2110 | 2110 | raise RuntimeError('can\'t reuse the manifest: ' |
|
2111 | 2111 | 'its p1 doesn\'t match the new ctx p1') |
|
2112 | 2112 | if p2 != nullid and p2.manifestnode() != mp2: |
|
2113 | 2113 | raise RuntimeError('can\'t reuse the manifest: ' |
|
2114 | 2114 | 'its p2 doesn\'t match the new ctx p2') |
|
2115 | 2115 | |
|
2116 | 2116 | self._files = originalctx.files() |
|
2117 | 2117 | self.substate = {} |
|
2118 | 2118 | |
|
2119 | 2119 | if extra: |
|
2120 | 2120 | self._extra = extra.copy() |
|
2121 | 2121 | else: |
|
2122 | 2122 | self._extra = {} |
|
2123 | 2123 | |
|
2124 | 2124 | if self._extra.get('branch', '') == '': |
|
2125 | 2125 | self._extra['branch'] = 'default' |
|
2126 | 2126 | |
|
2127 | 2127 | if editor: |
|
2128 | 2128 | self._text = editor(self._repo, self, []) |
|
2129 | 2129 | self._repo.savecommitmessage(self._text) |
|
2130 | 2130 | |
|
2131 | 2131 | def manifestnode(self): |
|
2132 | 2132 | return self._manifestnode |
|
2133 | 2133 | |
|
2134 | 2134 | @propertycache |
|
2135 | 2135 | def _manifestctx(self): |
|
2136 | 2136 | return self._repo.manifestlog[self._manifestnode] |
|
2137 | 2137 | |
|
2138 | 2138 | def filectx(self, path, filelog=None): |
|
2139 | 2139 | return self._originalctx.filectx(path, filelog=filelog) |
|
2140 | 2140 | |
|
2141 | 2141 | def commit(self): |
|
2142 | 2142 | """commit context to the repo""" |
|
2143 | 2143 | return self._repo.commitctx(self) |
|
2144 | 2144 | |
|
2145 | 2145 | @property |
|
2146 | 2146 | def _manifest(self): |
|
2147 | 2147 | return self._originalctx.manifest() |
|
2148 | 2148 | |
|
2149 | 2149 | @propertycache |
|
2150 | 2150 | def _status(self): |
|
2151 | 2151 | """Calculate exact status from ``files`` specified in the ``origctx`` |
|
2152 | 2152 | and parents manifests. |
|
2153 | 2153 | """ |
|
2154 | 2154 | man1 = self.p1().manifest() |
|
2155 | 2155 | p2 = self._parents[1] |
|
2156 | 2156 | # "1 < len(self._parents)" can't be used for checking |
|
2157 | 2157 | # existence of the 2nd parent, because "metadataonlyctx._parents" is |
|
2158 | 2158 | # explicitly initialized by the list, of which length is 2. |
|
2159 | 2159 | if p2.node() != nullid: |
|
2160 | 2160 | man2 = p2.manifest() |
|
2161 | 2161 | managing = lambda f: f in man1 or f in man2 |
|
2162 | 2162 | else: |
|
2163 | 2163 | managing = lambda f: f in man1 |
|
2164 | 2164 | |
|
2165 | 2165 | modified, added, removed = [], [], [] |
|
2166 | 2166 | for f in self._files: |
|
2167 | 2167 | if not managing(f): |
|
2168 | 2168 | added.append(f) |
|
2169 | 2169 | elif self[f]: |
|
2170 | 2170 | modified.append(f) |
|
2171 | 2171 | else: |
|
2172 | 2172 | removed.append(f) |
|
2173 | 2173 | |
|
2174 | 2174 | return scmutil.status(modified, added, removed, [], [], [], []) |
@@ -1,1066 +1,1066 b'' | |||
|
1 | 1 | # smartset.py - data structure for revision set |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | from . import ( |
|
11 | 11 | util, |
|
12 | 12 | ) |
|
13 | 13 | |
|
14 | 14 | def _formatsetrepr(r): |
|
15 | 15 | """Format an optional printable representation of a set |
|
16 | 16 | |
|
17 | 17 | ======== ================================= |
|
18 | 18 | type(r) example |
|
19 | 19 | ======== ================================= |
|
20 | 20 | tuple ('<not %r>', other) |
|
21 | 21 | str '<branch closed>' |
|
22 | 22 | callable lambda: '<branch %r>' % sorted(b) |
|
23 | 23 | object other |
|
24 | 24 | ======== ================================= |
|
25 | 25 | """ |
|
26 | 26 | if r is None: |
|
27 | 27 | return '' |
|
28 | 28 | elif isinstance(r, tuple): |
|
29 | 29 | return r[0] % r[1:] |
|
30 | 30 | elif isinstance(r, str): |
|
31 | 31 | return r |
|
32 | 32 | elif callable(r): |
|
33 | 33 | return r() |
|
34 | 34 | else: |
|
35 | 35 | return repr(r) |
|
36 | 36 | |
|
37 | 37 | class abstractsmartset(object): |
|
38 | 38 | |
|
39 | 39 | def __nonzero__(self): |
|
40 | 40 | """True if the smartset is not empty""" |
|
41 | 41 | raise NotImplementedError() |
|
42 | 42 | |
|
43 | 43 | __bool__ = __nonzero__ |
|
44 | 44 | |
|
45 | 45 | def __contains__(self, rev): |
|
46 | 46 | """provide fast membership testing""" |
|
47 | 47 | raise NotImplementedError() |
|
48 | 48 | |
|
49 | 49 | def __iter__(self): |
|
50 | 50 | """iterate the set in the order it is supposed to be iterated""" |
|
51 | 51 | raise NotImplementedError() |
|
52 | 52 | |
|
53 | 53 | # Attributes containing a function to perform a fast iteration in a given |
|
54 | 54 | # direction. A smartset can have none, one, or both defined. |
|
55 | 55 | # |
|
56 | 56 | # Default value is None instead of a function returning None to avoid |
|
57 | 57 | # initializing an iterator just for testing if a fast method exists. |
|
58 | 58 | fastasc = None |
|
59 | 59 | fastdesc = None |
|
60 | 60 | |
|
61 | 61 | def isascending(self): |
|
62 | 62 | """True if the set will iterate in ascending order""" |
|
63 | 63 | raise NotImplementedError() |
|
64 | 64 | |
|
65 | 65 | def isdescending(self): |
|
66 | 66 | """True if the set will iterate in descending order""" |
|
67 | 67 | raise NotImplementedError() |
|
68 | 68 | |
|
69 | 69 | def istopo(self): |
|
70 | 70 | """True if the set will iterate in topographical order""" |
|
71 | 71 | raise NotImplementedError() |
|
72 | 72 | |
|
73 | 73 | def min(self): |
|
74 | 74 | """return the minimum element in the set""" |
|
75 | 75 | if self.fastasc is None: |
|
76 | 76 | v = min(self) |
|
77 | 77 | else: |
|
78 | 78 | for v in self.fastasc(): |
|
79 | 79 | break |
|
80 | 80 | else: |
|
81 | 81 | raise ValueError('arg is an empty sequence') |
|
82 | 82 | self.min = lambda: v |
|
83 | 83 | return v |
|
84 | 84 | |
|
85 | 85 | def max(self): |
|
86 | 86 | """return the maximum element in the set""" |
|
87 | 87 | if self.fastdesc is None: |
|
88 | 88 | return max(self) |
|
89 | 89 | else: |
|
90 | 90 | for v in self.fastdesc(): |
|
91 | 91 | break |
|
92 | 92 | else: |
|
93 | 93 | raise ValueError('arg is an empty sequence') |
|
94 | 94 | self.max = lambda: v |
|
95 | 95 | return v |
|
96 | 96 | |
|
97 | 97 | def first(self): |
|
98 | 98 | """return the first element in the set (user iteration perspective) |
|
99 | 99 | |
|
100 | 100 | Return None if the set is empty""" |
|
101 | 101 | raise NotImplementedError() |
|
102 | 102 | |
|
103 | 103 | def last(self): |
|
104 | 104 | """return the last element in the set (user iteration perspective) |
|
105 | 105 | |
|
106 | 106 | Return None if the set is empty""" |
|
107 | 107 | raise NotImplementedError() |
|
108 | 108 | |
|
109 | 109 | def __len__(self): |
|
110 | 110 | """return the length of the smartsets |
|
111 | 111 | |
|
112 | 112 | This can be expensive on smartset that could be lazy otherwise.""" |
|
113 | 113 | raise NotImplementedError() |
|
114 | 114 | |
|
115 | 115 | def reverse(self): |
|
116 | 116 | """reverse the expected iteration order""" |
|
117 | 117 | raise NotImplementedError() |
|
118 | 118 | |
|
119 | 119 | def sort(self, reverse=True): |
|
120 | 120 | """get the set to iterate in an ascending or descending order""" |
|
121 | 121 | raise NotImplementedError() |
|
122 | 122 | |
|
123 | 123 | def __and__(self, other): |
|
124 | 124 | """Returns a new object with the intersection of the two collections. |
|
125 | 125 | |
|
126 | 126 | This is part of the mandatory API for smartset.""" |
|
127 | 127 | if isinstance(other, fullreposet): |
|
128 | 128 | return self |
|
129 | 129 | return self.filter(other.__contains__, condrepr=other, cache=False) |
|
130 | 130 | |
|
131 | 131 | def __add__(self, other): |
|
132 | 132 | """Returns a new object with the union of the two collections. |
|
133 | 133 | |
|
134 | 134 | This is part of the mandatory API for smartset.""" |
|
135 | 135 | return addset(self, other) |
|
136 | 136 | |
|
137 | 137 | def __sub__(self, other): |
|
138 | 138 | """Returns a new object with the substraction of the two collections. |
|
139 | 139 | |
|
140 | 140 | This is part of the mandatory API for smartset.""" |
|
141 | 141 | c = other.__contains__ |
|
142 | 142 | return self.filter(lambda r: not c(r), condrepr=('<not %r>', other), |
|
143 | 143 | cache=False) |
|
144 | 144 | |
|
145 | 145 | def filter(self, condition, condrepr=None, cache=True): |
|
146 | 146 | """Returns this smartset filtered by condition as a new smartset. |
|
147 | 147 | |
|
148 | 148 | `condition` is a callable which takes a revision number and returns a |
|
149 | 149 | boolean. Optional `condrepr` provides a printable representation of |
|
150 | 150 | the given `condition`. |
|
151 | 151 | |
|
152 | 152 | This is part of the mandatory API for smartset.""" |
|
153 | 153 | # builtin cannot be cached. but do not needs to |
|
154 | 154 | if cache and util.safehasattr(condition, 'func_code'): |
|
155 | 155 | condition = util.cachefunc(condition) |
|
156 | 156 | return filteredset(self, condition, condrepr) |
|
157 | 157 | |
|
158 | 158 | class baseset(abstractsmartset): |
|
159 | 159 | """Basic data structure that represents a revset and contains the basic |
|
160 | 160 | operation that it should be able to perform. |
|
161 | 161 | |
|
162 | 162 | Every method in this class should be implemented by any smartset class. |
|
163 | 163 | |
|
164 | 164 | This class could be constructed by an (unordered) set, or an (ordered) |
|
165 | 165 | list-like object. If a set is provided, it'll be sorted lazily. |
|
166 | 166 | |
|
167 | 167 | >>> x = [4, 0, 7, 6] |
|
168 | 168 | >>> y = [5, 6, 7, 3] |
|
169 | 169 | |
|
170 | 170 | Construct by a set: |
|
171 | 171 | >>> xs = baseset(set(x)) |
|
172 | 172 | >>> ys = baseset(set(y)) |
|
173 | 173 | >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] |
|
174 | 174 | [[0, 4, 6, 7, 3, 5], [6, 7], [0, 4]] |
|
175 | 175 | >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] |
|
176 | 176 | ['addset', 'baseset', 'baseset'] |
|
177 | 177 | |
|
178 | 178 | Construct by a list-like: |
|
179 | 179 | >>> xs = baseset(x) |
|
180 | 180 | >>> ys = baseset(i for i in y) |
|
181 | 181 | >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] |
|
182 | 182 | [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]] |
|
183 | 183 | >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] |
|
184 | 184 | ['addset', 'filteredset', 'filteredset'] |
|
185 | 185 | |
|
186 | 186 | Populate "_set" fields in the lists so set optimization may be used: |
|
187 | 187 | >>> [1 in xs, 3 in ys] |
|
188 | 188 | [False, True] |
|
189 | 189 | |
|
190 | 190 | Without sort(), results won't be changed: |
|
191 | 191 | >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] |
|
192 | 192 | [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]] |
|
193 | 193 | >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] |
|
194 | 194 | ['addset', 'filteredset', 'filteredset'] |
|
195 | 195 | |
|
196 | 196 | With sort(), set optimization could be used: |
|
197 | 197 | >>> xs.sort(reverse=True) |
|
198 | 198 | >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] |
|
199 | 199 | [[7, 6, 4, 0, 5, 3], [7, 6], [4, 0]] |
|
200 | 200 | >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] |
|
201 | 201 | ['addset', 'baseset', 'baseset'] |
|
202 | 202 | |
|
203 | 203 | >>> ys.sort() |
|
204 | 204 | >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] |
|
205 | 205 | [[7, 6, 4, 0, 3, 5], [7, 6], [4, 0]] |
|
206 | 206 | >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] |
|
207 | 207 | ['addset', 'baseset', 'baseset'] |
|
208 | 208 | |
|
209 | 209 | istopo is preserved across set operations |
|
210 | 210 | >>> xs = baseset(set(x), istopo=True) |
|
211 | 211 | >>> rs = xs & ys |
|
212 | 212 | >>> type(rs).__name__ |
|
213 | 213 | 'baseset' |
|
214 | 214 | >>> rs._istopo |
|
215 | 215 | True |
|
216 | 216 | """ |
|
217 | 217 | def __init__(self, data=(), datarepr=None, istopo=False): |
|
218 | 218 | """ |
|
219 | 219 | datarepr: a tuple of (format, obj, ...), a function or an object that |
|
220 | 220 | provides a printable representation of the given data. |
|
221 | 221 | """ |
|
222 | 222 | self._ascending = None |
|
223 | 223 | self._istopo = istopo |
|
224 | 224 | if isinstance(data, set): |
|
225 | 225 | # converting set to list has a cost, do it lazily |
|
226 | 226 | self._set = data |
|
227 | 227 | # set has no order we pick one for stability purpose |
|
228 | 228 | self._ascending = True |
|
229 | 229 | else: |
|
230 | 230 | if not isinstance(data, list): |
|
231 | 231 | data = list(data) |
|
232 | 232 | self._list = data |
|
233 | 233 | self._datarepr = datarepr |
|
234 | 234 | |
|
235 | 235 | @util.propertycache |
|
236 | 236 | def _set(self): |
|
237 | 237 | return set(self._list) |
|
238 | 238 | |
|
239 | 239 | @util.propertycache |
|
240 | 240 | def _asclist(self): |
|
241 | 241 | asclist = self._list[:] |
|
242 | 242 | asclist.sort() |
|
243 | 243 | return asclist |
|
244 | 244 | |
|
245 | 245 | @util.propertycache |
|
246 | 246 | def _list(self): |
|
247 | 247 | # _list is only lazily constructed if we have _set |
|
248 | assert '_set' in self.__dict__ | |
|
248 | assert r'_set' in self.__dict__ | |
|
249 | 249 | return list(self._set) |
|
250 | 250 | |
|
251 | 251 | def __iter__(self): |
|
252 | 252 | if self._ascending is None: |
|
253 | 253 | return iter(self._list) |
|
254 | 254 | elif self._ascending: |
|
255 | 255 | return iter(self._asclist) |
|
256 | 256 | else: |
|
257 | 257 | return reversed(self._asclist) |
|
258 | 258 | |
|
259 | 259 | def fastasc(self): |
|
260 | 260 | return iter(self._asclist) |
|
261 | 261 | |
|
262 | 262 | def fastdesc(self): |
|
263 | 263 | return reversed(self._asclist) |
|
264 | 264 | |
|
265 | 265 | @util.propertycache |
|
266 | 266 | def __contains__(self): |
|
267 | 267 | return self._set.__contains__ |
|
268 | 268 | |
|
269 | 269 | def __nonzero__(self): |
|
270 | 270 | return bool(len(self)) |
|
271 | 271 | |
|
272 | 272 | __bool__ = __nonzero__ |
|
273 | 273 | |
|
274 | 274 | def sort(self, reverse=False): |
|
275 | 275 | self._ascending = not bool(reverse) |
|
276 | 276 | self._istopo = False |
|
277 | 277 | |
|
278 | 278 | def reverse(self): |
|
279 | 279 | if self._ascending is None: |
|
280 | 280 | self._list.reverse() |
|
281 | 281 | else: |
|
282 | 282 | self._ascending = not self._ascending |
|
283 | 283 | self._istopo = False |
|
284 | 284 | |
|
285 | 285 | def __len__(self): |
|
286 | 286 | if '_list' in self.__dict__: |
|
287 | 287 | return len(self._list) |
|
288 | 288 | else: |
|
289 | 289 | return len(self._set) |
|
290 | 290 | |
|
291 | 291 | def isascending(self): |
|
292 | 292 | """Returns True if the collection is ascending order, False if not. |
|
293 | 293 | |
|
294 | 294 | This is part of the mandatory API for smartset.""" |
|
295 | 295 | if len(self) <= 1: |
|
296 | 296 | return True |
|
297 | 297 | return self._ascending is not None and self._ascending |
|
298 | 298 | |
|
299 | 299 | def isdescending(self): |
|
300 | 300 | """Returns True if the collection is descending order, False if not. |
|
301 | 301 | |
|
302 | 302 | This is part of the mandatory API for smartset.""" |
|
303 | 303 | if len(self) <= 1: |
|
304 | 304 | return True |
|
305 | 305 | return self._ascending is not None and not self._ascending |
|
306 | 306 | |
|
307 | 307 | def istopo(self): |
|
308 | 308 | """Is the collection is in topographical order or not. |
|
309 | 309 | |
|
310 | 310 | This is part of the mandatory API for smartset.""" |
|
311 | 311 | if len(self) <= 1: |
|
312 | 312 | return True |
|
313 | 313 | return self._istopo |
|
314 | 314 | |
|
315 | 315 | def first(self): |
|
316 | 316 | if self: |
|
317 | 317 | if self._ascending is None: |
|
318 | 318 | return self._list[0] |
|
319 | 319 | elif self._ascending: |
|
320 | 320 | return self._asclist[0] |
|
321 | 321 | else: |
|
322 | 322 | return self._asclist[-1] |
|
323 | 323 | return None |
|
324 | 324 | |
|
325 | 325 | def last(self): |
|
326 | 326 | if self: |
|
327 | 327 | if self._ascending is None: |
|
328 | 328 | return self._list[-1] |
|
329 | 329 | elif self._ascending: |
|
330 | 330 | return self._asclist[-1] |
|
331 | 331 | else: |
|
332 | 332 | return self._asclist[0] |
|
333 | 333 | return None |
|
334 | 334 | |
|
335 | 335 | def _fastsetop(self, other, op): |
|
336 | 336 | # try to use native set operations as fast paths |
|
337 | 337 | if (type(other) is baseset and '_set' in other.__dict__ and '_set' in |
|
338 | 338 | self.__dict__ and self._ascending is not None): |
|
339 | 339 | s = baseset(data=getattr(self._set, op)(other._set), |
|
340 | 340 | istopo=self._istopo) |
|
341 | 341 | s._ascending = self._ascending |
|
342 | 342 | else: |
|
343 | 343 | s = getattr(super(baseset, self), op)(other) |
|
344 | 344 | return s |
|
345 | 345 | |
|
346 | 346 | def __and__(self, other): |
|
347 | 347 | return self._fastsetop(other, '__and__') |
|
348 | 348 | |
|
349 | 349 | def __sub__(self, other): |
|
350 | 350 | return self._fastsetop(other, '__sub__') |
|
351 | 351 | |
|
352 | 352 | def __repr__(self): |
|
353 | 353 | d = {None: '', False: '-', True: '+'}[self._ascending] |
|
354 | 354 | s = _formatsetrepr(self._datarepr) |
|
355 | 355 | if not s: |
|
356 | 356 | l = self._list |
|
357 | 357 | # if _list has been built from a set, it might have a different |
|
358 | 358 | # order from one python implementation to another. |
|
359 | 359 | # We fallback to the sorted version for a stable output. |
|
360 | 360 | if self._ascending is not None: |
|
361 | 361 | l = self._asclist |
|
362 | 362 | s = repr(l) |
|
363 | 363 | return '<%s%s %s>' % (type(self).__name__, d, s) |
|
364 | 364 | |
|
365 | 365 | class filteredset(abstractsmartset): |
|
366 | 366 | """Duck type for baseset class which iterates lazily over the revisions in |
|
367 | 367 | the subset and contains a function which tests for membership in the |
|
368 | 368 | revset |
|
369 | 369 | """ |
|
370 | 370 | def __init__(self, subset, condition=lambda x: True, condrepr=None): |
|
371 | 371 | """ |
|
372 | 372 | condition: a function that decide whether a revision in the subset |
|
373 | 373 | belongs to the revset or not. |
|
374 | 374 | condrepr: a tuple of (format, obj, ...), a function or an object that |
|
375 | 375 | provides a printable representation of the given condition. |
|
376 | 376 | """ |
|
377 | 377 | self._subset = subset |
|
378 | 378 | self._condition = condition |
|
379 | 379 | self._condrepr = condrepr |
|
380 | 380 | |
|
381 | 381 | def __contains__(self, x): |
|
382 | 382 | return x in self._subset and self._condition(x) |
|
383 | 383 | |
|
384 | 384 | def __iter__(self): |
|
385 | 385 | return self._iterfilter(self._subset) |
|
386 | 386 | |
|
387 | 387 | def _iterfilter(self, it): |
|
388 | 388 | cond = self._condition |
|
389 | 389 | for x in it: |
|
390 | 390 | if cond(x): |
|
391 | 391 | yield x |
|
392 | 392 | |
|
393 | 393 | @property |
|
394 | 394 | def fastasc(self): |
|
395 | 395 | it = self._subset.fastasc |
|
396 | 396 | if it is None: |
|
397 | 397 | return None |
|
398 | 398 | return lambda: self._iterfilter(it()) |
|
399 | 399 | |
|
400 | 400 | @property |
|
401 | 401 | def fastdesc(self): |
|
402 | 402 | it = self._subset.fastdesc |
|
403 | 403 | if it is None: |
|
404 | 404 | return None |
|
405 | 405 | return lambda: self._iterfilter(it()) |
|
406 | 406 | |
|
407 | 407 | def __nonzero__(self): |
|
408 | 408 | fast = None |
|
409 | 409 | candidates = [self.fastasc if self.isascending() else None, |
|
410 | 410 | self.fastdesc if self.isdescending() else None, |
|
411 | 411 | self.fastasc, |
|
412 | 412 | self.fastdesc] |
|
413 | 413 | for candidate in candidates: |
|
414 | 414 | if candidate is not None: |
|
415 | 415 | fast = candidate |
|
416 | 416 | break |
|
417 | 417 | |
|
418 | 418 | if fast is not None: |
|
419 | 419 | it = fast() |
|
420 | 420 | else: |
|
421 | 421 | it = self |
|
422 | 422 | |
|
423 | 423 | for r in it: |
|
424 | 424 | return True |
|
425 | 425 | return False |
|
426 | 426 | |
|
427 | 427 | __bool__ = __nonzero__ |
|
428 | 428 | |
|
429 | 429 | def __len__(self): |
|
430 | 430 | # Basic implementation to be changed in future patches. |
|
431 | 431 | # until this gets improved, we use generator expression |
|
432 | 432 | # here, since list comprehensions are free to call __len__ again |
|
433 | 433 | # causing infinite recursion |
|
434 | 434 | l = baseset(r for r in self) |
|
435 | 435 | return len(l) |
|
436 | 436 | |
|
437 | 437 | def sort(self, reverse=False): |
|
438 | 438 | self._subset.sort(reverse=reverse) |
|
439 | 439 | |
|
440 | 440 | def reverse(self): |
|
441 | 441 | self._subset.reverse() |
|
442 | 442 | |
|
443 | 443 | def isascending(self): |
|
444 | 444 | return self._subset.isascending() |
|
445 | 445 | |
|
446 | 446 | def isdescending(self): |
|
447 | 447 | return self._subset.isdescending() |
|
448 | 448 | |
|
449 | 449 | def istopo(self): |
|
450 | 450 | return self._subset.istopo() |
|
451 | 451 | |
|
452 | 452 | def first(self): |
|
453 | 453 | for x in self: |
|
454 | 454 | return x |
|
455 | 455 | return None |
|
456 | 456 | |
|
457 | 457 | def last(self): |
|
458 | 458 | it = None |
|
459 | 459 | if self.isascending(): |
|
460 | 460 | it = self.fastdesc |
|
461 | 461 | elif self.isdescending(): |
|
462 | 462 | it = self.fastasc |
|
463 | 463 | if it is not None: |
|
464 | 464 | for x in it(): |
|
465 | 465 | return x |
|
466 | 466 | return None #empty case |
|
467 | 467 | else: |
|
468 | 468 | x = None |
|
469 | 469 | for x in self: |
|
470 | 470 | pass |
|
471 | 471 | return x |
|
472 | 472 | |
|
473 | 473 | def __repr__(self): |
|
474 | 474 | xs = [repr(self._subset)] |
|
475 | 475 | s = _formatsetrepr(self._condrepr) |
|
476 | 476 | if s: |
|
477 | 477 | xs.append(s) |
|
478 | 478 | return '<%s %s>' % (type(self).__name__, ', '.join(xs)) |
|
479 | 479 | |
|
480 | 480 | def _iterordered(ascending, iter1, iter2): |
|
481 | 481 | """produce an ordered iteration from two iterators with the same order |
|
482 | 482 | |
|
483 | 483 | The ascending is used to indicated the iteration direction. |
|
484 | 484 | """ |
|
485 | 485 | choice = max |
|
486 | 486 | if ascending: |
|
487 | 487 | choice = min |
|
488 | 488 | |
|
489 | 489 | val1 = None |
|
490 | 490 | val2 = None |
|
491 | 491 | try: |
|
492 | 492 | # Consume both iterators in an ordered way until one is empty |
|
493 | 493 | while True: |
|
494 | 494 | if val1 is None: |
|
495 | 495 | val1 = next(iter1) |
|
496 | 496 | if val2 is None: |
|
497 | 497 | val2 = next(iter2) |
|
498 | 498 | n = choice(val1, val2) |
|
499 | 499 | yield n |
|
500 | 500 | if val1 == n: |
|
501 | 501 | val1 = None |
|
502 | 502 | if val2 == n: |
|
503 | 503 | val2 = None |
|
504 | 504 | except StopIteration: |
|
505 | 505 | # Flush any remaining values and consume the other one |
|
506 | 506 | it = iter2 |
|
507 | 507 | if val1 is not None: |
|
508 | 508 | yield val1 |
|
509 | 509 | it = iter1 |
|
510 | 510 | elif val2 is not None: |
|
511 | 511 | # might have been equality and both are empty |
|
512 | 512 | yield val2 |
|
513 | 513 | for val in it: |
|
514 | 514 | yield val |
|
515 | 515 | |
|
516 | 516 | class addset(abstractsmartset): |
|
517 | 517 | """Represent the addition of two sets |
|
518 | 518 | |
|
519 | 519 | Wrapper structure for lazily adding two structures without losing much |
|
520 | 520 | performance on the __contains__ method |
|
521 | 521 | |
|
522 | 522 | If the ascending attribute is set, that means the two structures are |
|
523 | 523 | ordered in either an ascending or descending way. Therefore, we can add |
|
524 | 524 | them maintaining the order by iterating over both at the same time |
|
525 | 525 | |
|
526 | 526 | >>> xs = baseset([0, 3, 2]) |
|
527 | 527 | >>> ys = baseset([5, 2, 4]) |
|
528 | 528 | |
|
529 | 529 | >>> rs = addset(xs, ys) |
|
530 | 530 | >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last() |
|
531 | 531 | (True, True, False, True, 0, 4) |
|
532 | 532 | >>> rs = addset(xs, baseset([])) |
|
533 | 533 | >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last() |
|
534 | 534 | (True, True, False, 0, 2) |
|
535 | 535 | >>> rs = addset(baseset([]), baseset([])) |
|
536 | 536 | >>> bool(rs), 0 in rs, rs.first(), rs.last() |
|
537 | 537 | (False, False, None, None) |
|
538 | 538 | |
|
539 | 539 | iterate unsorted: |
|
540 | 540 | >>> rs = addset(xs, ys) |
|
541 | 541 | >>> # (use generator because pypy could call len()) |
|
542 | 542 | >>> list(x for x in rs) # without _genlist |
|
543 | 543 | [0, 3, 2, 5, 4] |
|
544 | 544 | >>> assert not rs._genlist |
|
545 | 545 | >>> len(rs) |
|
546 | 546 | 5 |
|
547 | 547 | >>> [x for x in rs] # with _genlist |
|
548 | 548 | [0, 3, 2, 5, 4] |
|
549 | 549 | >>> assert rs._genlist |
|
550 | 550 | |
|
551 | 551 | iterate ascending: |
|
552 | 552 | >>> rs = addset(xs, ys, ascending=True) |
|
553 | 553 | >>> # (use generator because pypy could call len()) |
|
554 | 554 | >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist |
|
555 | 555 | ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) |
|
556 | 556 | >>> assert not rs._asclist |
|
557 | 557 | >>> len(rs) |
|
558 | 558 | 5 |
|
559 | 559 | >>> [x for x in rs], [x for x in rs.fastasc()] |
|
560 | 560 | ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) |
|
561 | 561 | >>> assert rs._asclist |
|
562 | 562 | |
|
563 | 563 | iterate descending: |
|
564 | 564 | >>> rs = addset(xs, ys, ascending=False) |
|
565 | 565 | >>> # (use generator because pypy could call len()) |
|
566 | 566 | >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist |
|
567 | 567 | ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) |
|
568 | 568 | >>> assert not rs._asclist |
|
569 | 569 | >>> len(rs) |
|
570 | 570 | 5 |
|
571 | 571 | >>> [x for x in rs], [x for x in rs.fastdesc()] |
|
572 | 572 | ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) |
|
573 | 573 | >>> assert rs._asclist |
|
574 | 574 | |
|
575 | 575 | iterate ascending without fastasc: |
|
576 | 576 | >>> rs = addset(xs, generatorset(ys), ascending=True) |
|
577 | 577 | >>> assert rs.fastasc is None |
|
578 | 578 | >>> [x for x in rs] |
|
579 | 579 | [0, 2, 3, 4, 5] |
|
580 | 580 | |
|
581 | 581 | iterate descending without fastdesc: |
|
582 | 582 | >>> rs = addset(generatorset(xs), ys, ascending=False) |
|
583 | 583 | >>> assert rs.fastdesc is None |
|
584 | 584 | >>> [x for x in rs] |
|
585 | 585 | [5, 4, 3, 2, 0] |
|
586 | 586 | """ |
|
587 | 587 | def __init__(self, revs1, revs2, ascending=None): |
|
588 | 588 | self._r1 = revs1 |
|
589 | 589 | self._r2 = revs2 |
|
590 | 590 | self._iter = None |
|
591 | 591 | self._ascending = ascending |
|
592 | 592 | self._genlist = None |
|
593 | 593 | self._asclist = None |
|
594 | 594 | |
|
595 | 595 | def __len__(self): |
|
596 | 596 | return len(self._list) |
|
597 | 597 | |
|
598 | 598 | def __nonzero__(self): |
|
599 | 599 | return bool(self._r1) or bool(self._r2) |
|
600 | 600 | |
|
601 | 601 | __bool__ = __nonzero__ |
|
602 | 602 | |
|
603 | 603 | @util.propertycache |
|
604 | 604 | def _list(self): |
|
605 | 605 | if not self._genlist: |
|
606 | 606 | self._genlist = baseset(iter(self)) |
|
607 | 607 | return self._genlist |
|
608 | 608 | |
|
609 | 609 | def __iter__(self): |
|
610 | 610 | """Iterate over both collections without repeating elements |
|
611 | 611 | |
|
612 | 612 | If the ascending attribute is not set, iterate over the first one and |
|
613 | 613 | then over the second one checking for membership on the first one so we |
|
614 | 614 | dont yield any duplicates. |
|
615 | 615 | |
|
616 | 616 | If the ascending attribute is set, iterate over both collections at the |
|
617 | 617 | same time, yielding only one value at a time in the given order. |
|
618 | 618 | """ |
|
619 | 619 | if self._ascending is None: |
|
620 | 620 | if self._genlist: |
|
621 | 621 | return iter(self._genlist) |
|
622 | 622 | def arbitraryordergen(): |
|
623 | 623 | for r in self._r1: |
|
624 | 624 | yield r |
|
625 | 625 | inr1 = self._r1.__contains__ |
|
626 | 626 | for r in self._r2: |
|
627 | 627 | if not inr1(r): |
|
628 | 628 | yield r |
|
629 | 629 | return arbitraryordergen() |
|
630 | 630 | # try to use our own fast iterator if it exists |
|
631 | 631 | self._trysetasclist() |
|
632 | 632 | if self._ascending: |
|
633 | 633 | attr = 'fastasc' |
|
634 | 634 | else: |
|
635 | 635 | attr = 'fastdesc' |
|
636 | 636 | it = getattr(self, attr) |
|
637 | 637 | if it is not None: |
|
638 | 638 | return it() |
|
639 | 639 | # maybe half of the component supports fast |
|
640 | 640 | # get iterator for _r1 |
|
641 | 641 | iter1 = getattr(self._r1, attr) |
|
642 | 642 | if iter1 is None: |
|
643 | 643 | # let's avoid side effect (not sure it matters) |
|
644 | 644 | iter1 = iter(sorted(self._r1, reverse=not self._ascending)) |
|
645 | 645 | else: |
|
646 | 646 | iter1 = iter1() |
|
647 | 647 | # get iterator for _r2 |
|
648 | 648 | iter2 = getattr(self._r2, attr) |
|
649 | 649 | if iter2 is None: |
|
650 | 650 | # let's avoid side effect (not sure it matters) |
|
651 | 651 | iter2 = iter(sorted(self._r2, reverse=not self._ascending)) |
|
652 | 652 | else: |
|
653 | 653 | iter2 = iter2() |
|
654 | 654 | return _iterordered(self._ascending, iter1, iter2) |
|
655 | 655 | |
|
656 | 656 | def _trysetasclist(self): |
|
657 | 657 | """populate the _asclist attribute if possible and necessary""" |
|
658 | 658 | if self._genlist is not None and self._asclist is None: |
|
659 | 659 | self._asclist = sorted(self._genlist) |
|
660 | 660 | |
|
661 | 661 | @property |
|
662 | 662 | def fastasc(self): |
|
663 | 663 | self._trysetasclist() |
|
664 | 664 | if self._asclist is not None: |
|
665 | 665 | return self._asclist.__iter__ |
|
666 | 666 | iter1 = self._r1.fastasc |
|
667 | 667 | iter2 = self._r2.fastasc |
|
668 | 668 | if None in (iter1, iter2): |
|
669 | 669 | return None |
|
670 | 670 | return lambda: _iterordered(True, iter1(), iter2()) |
|
671 | 671 | |
|
672 | 672 | @property |
|
673 | 673 | def fastdesc(self): |
|
674 | 674 | self._trysetasclist() |
|
675 | 675 | if self._asclist is not None: |
|
676 | 676 | return self._asclist.__reversed__ |
|
677 | 677 | iter1 = self._r1.fastdesc |
|
678 | 678 | iter2 = self._r2.fastdesc |
|
679 | 679 | if None in (iter1, iter2): |
|
680 | 680 | return None |
|
681 | 681 | return lambda: _iterordered(False, iter1(), iter2()) |
|
682 | 682 | |
|
683 | 683 | def __contains__(self, x): |
|
684 | 684 | return x in self._r1 or x in self._r2 |
|
685 | 685 | |
|
686 | 686 | def sort(self, reverse=False): |
|
687 | 687 | """Sort the added set |
|
688 | 688 | |
|
689 | 689 | For this we use the cached list with all the generated values and if we |
|
690 | 690 | know they are ascending or descending we can sort them in a smart way. |
|
691 | 691 | """ |
|
692 | 692 | self._ascending = not reverse |
|
693 | 693 | |
|
694 | 694 | def isascending(self): |
|
695 | 695 | return self._ascending is not None and self._ascending |
|
696 | 696 | |
|
697 | 697 | def isdescending(self): |
|
698 | 698 | return self._ascending is not None and not self._ascending |
|
699 | 699 | |
|
700 | 700 | def istopo(self): |
|
701 | 701 | # not worth the trouble asserting if the two sets combined are still |
|
702 | 702 | # in topographical order. Use the sort() predicate to explicitly sort |
|
703 | 703 | # again instead. |
|
704 | 704 | return False |
|
705 | 705 | |
|
706 | 706 | def reverse(self): |
|
707 | 707 | if self._ascending is None: |
|
708 | 708 | self._list.reverse() |
|
709 | 709 | else: |
|
710 | 710 | self._ascending = not self._ascending |
|
711 | 711 | |
|
712 | 712 | def first(self): |
|
713 | 713 | for x in self: |
|
714 | 714 | return x |
|
715 | 715 | return None |
|
716 | 716 | |
|
717 | 717 | def last(self): |
|
718 | 718 | self.reverse() |
|
719 | 719 | val = self.first() |
|
720 | 720 | self.reverse() |
|
721 | 721 | return val |
|
722 | 722 | |
|
723 | 723 | def __repr__(self): |
|
724 | 724 | d = {None: '', False: '-', True: '+'}[self._ascending] |
|
725 | 725 | return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2) |
|
726 | 726 | |
|
727 | 727 | class generatorset(abstractsmartset): |
|
728 | 728 | """Wrap a generator for lazy iteration |
|
729 | 729 | |
|
730 | 730 | Wrapper structure for generators that provides lazy membership and can |
|
731 | 731 | be iterated more than once. |
|
732 | 732 | When asked for membership it generates values until either it finds the |
|
733 | 733 | requested one or has gone through all the elements in the generator |
|
734 | 734 | """ |
|
735 | 735 | def __init__(self, gen, iterasc=None): |
|
736 | 736 | """ |
|
737 | 737 | gen: a generator producing the values for the generatorset. |
|
738 | 738 | """ |
|
739 | 739 | self._gen = gen |
|
740 | 740 | self._asclist = None |
|
741 | 741 | self._cache = {} |
|
742 | 742 | self._genlist = [] |
|
743 | 743 | self._finished = False |
|
744 | 744 | self._ascending = True |
|
745 | 745 | if iterasc is not None: |
|
746 | 746 | if iterasc: |
|
747 | 747 | self.fastasc = self._iterator |
|
748 | 748 | self.__contains__ = self._asccontains |
|
749 | 749 | else: |
|
750 | 750 | self.fastdesc = self._iterator |
|
751 | 751 | self.__contains__ = self._desccontains |
|
752 | 752 | |
|
753 | 753 | def __nonzero__(self): |
|
754 | 754 | # Do not use 'for r in self' because it will enforce the iteration |
|
755 | 755 | # order (default ascending), possibly unrolling a whole descending |
|
756 | 756 | # iterator. |
|
757 | 757 | if self._genlist: |
|
758 | 758 | return True |
|
759 | 759 | for r in self._consumegen(): |
|
760 | 760 | return True |
|
761 | 761 | return False |
|
762 | 762 | |
|
763 | 763 | __bool__ = __nonzero__ |
|
764 | 764 | |
|
765 | 765 | def __contains__(self, x): |
|
766 | 766 | if x in self._cache: |
|
767 | 767 | return self._cache[x] |
|
768 | 768 | |
|
769 | 769 | # Use new values only, as existing values would be cached. |
|
770 | 770 | for l in self._consumegen(): |
|
771 | 771 | if l == x: |
|
772 | 772 | return True |
|
773 | 773 | |
|
774 | 774 | self._cache[x] = False |
|
775 | 775 | return False |
|
776 | 776 | |
|
777 | 777 | def _asccontains(self, x): |
|
778 | 778 | """version of contains optimised for ascending generator""" |
|
779 | 779 | if x in self._cache: |
|
780 | 780 | return self._cache[x] |
|
781 | 781 | |
|
782 | 782 | # Use new values only, as existing values would be cached. |
|
783 | 783 | for l in self._consumegen(): |
|
784 | 784 | if l == x: |
|
785 | 785 | return True |
|
786 | 786 | if l > x: |
|
787 | 787 | break |
|
788 | 788 | |
|
789 | 789 | self._cache[x] = False |
|
790 | 790 | return False |
|
791 | 791 | |
|
792 | 792 | def _desccontains(self, x): |
|
793 | 793 | """version of contains optimised for descending generator""" |
|
794 | 794 | if x in self._cache: |
|
795 | 795 | return self._cache[x] |
|
796 | 796 | |
|
797 | 797 | # Use new values only, as existing values would be cached. |
|
798 | 798 | for l in self._consumegen(): |
|
799 | 799 | if l == x: |
|
800 | 800 | return True |
|
801 | 801 | if l < x: |
|
802 | 802 | break |
|
803 | 803 | |
|
804 | 804 | self._cache[x] = False |
|
805 | 805 | return False |
|
806 | 806 | |
|
807 | 807 | def __iter__(self): |
|
808 | 808 | if self._ascending: |
|
809 | 809 | it = self.fastasc |
|
810 | 810 | else: |
|
811 | 811 | it = self.fastdesc |
|
812 | 812 | if it is not None: |
|
813 | 813 | return it() |
|
814 | 814 | # we need to consume the iterator |
|
815 | 815 | for x in self._consumegen(): |
|
816 | 816 | pass |
|
817 | 817 | # recall the same code |
|
818 | 818 | return iter(self) |
|
819 | 819 | |
|
820 | 820 | def _iterator(self): |
|
821 | 821 | if self._finished: |
|
822 | 822 | return iter(self._genlist) |
|
823 | 823 | |
|
824 | 824 | # We have to use this complex iteration strategy to allow multiple |
|
825 | 825 | # iterations at the same time. We need to be able to catch revision |
|
826 | 826 | # removed from _consumegen and added to genlist in another instance. |
|
827 | 827 | # |
|
828 | 828 | # Getting rid of it would provide an about 15% speed up on this |
|
829 | 829 | # iteration. |
|
830 | 830 | genlist = self._genlist |
|
831 | 831 | nextgen = self._consumegen() |
|
832 | 832 | _len, _next = len, next # cache global lookup |
|
833 | 833 | def gen(): |
|
834 | 834 | i = 0 |
|
835 | 835 | while True: |
|
836 | 836 | if i < _len(genlist): |
|
837 | 837 | yield genlist[i] |
|
838 | 838 | else: |
|
839 | 839 | yield _next(nextgen) |
|
840 | 840 | i += 1 |
|
841 | 841 | return gen() |
|
842 | 842 | |
|
843 | 843 | def _consumegen(self): |
|
844 | 844 | cache = self._cache |
|
845 | 845 | genlist = self._genlist.append |
|
846 | 846 | for item in self._gen: |
|
847 | 847 | cache[item] = True |
|
848 | 848 | genlist(item) |
|
849 | 849 | yield item |
|
850 | 850 | if not self._finished: |
|
851 | 851 | self._finished = True |
|
852 | 852 | asc = self._genlist[:] |
|
853 | 853 | asc.sort() |
|
854 | 854 | self._asclist = asc |
|
855 | 855 | self.fastasc = asc.__iter__ |
|
856 | 856 | self.fastdesc = asc.__reversed__ |
|
857 | 857 | |
|
858 | 858 | def __len__(self): |
|
859 | 859 | for x in self._consumegen(): |
|
860 | 860 | pass |
|
861 | 861 | return len(self._genlist) |
|
862 | 862 | |
|
863 | 863 | def sort(self, reverse=False): |
|
864 | 864 | self._ascending = not reverse |
|
865 | 865 | |
|
866 | 866 | def reverse(self): |
|
867 | 867 | self._ascending = not self._ascending |
|
868 | 868 | |
|
869 | 869 | def isascending(self): |
|
870 | 870 | return self._ascending |
|
871 | 871 | |
|
872 | 872 | def isdescending(self): |
|
873 | 873 | return not self._ascending |
|
874 | 874 | |
|
875 | 875 | def istopo(self): |
|
876 | 876 | # not worth the trouble asserting if the two sets combined are still |
|
877 | 877 | # in topographical order. Use the sort() predicate to explicitly sort |
|
878 | 878 | # again instead. |
|
879 | 879 | return False |
|
880 | 880 | |
|
881 | 881 | def first(self): |
|
882 | 882 | if self._ascending: |
|
883 | 883 | it = self.fastasc |
|
884 | 884 | else: |
|
885 | 885 | it = self.fastdesc |
|
886 | 886 | if it is None: |
|
887 | 887 | # we need to consume all and try again |
|
888 | 888 | for x in self._consumegen(): |
|
889 | 889 | pass |
|
890 | 890 | return self.first() |
|
891 | 891 | return next(it(), None) |
|
892 | 892 | |
|
893 | 893 | def last(self): |
|
894 | 894 | if self._ascending: |
|
895 | 895 | it = self.fastdesc |
|
896 | 896 | else: |
|
897 | 897 | it = self.fastasc |
|
898 | 898 | if it is None: |
|
899 | 899 | # we need to consume all and try again |
|
900 | 900 | for x in self._consumegen(): |
|
901 | 901 | pass |
|
902 | 902 | return self.first() |
|
903 | 903 | return next(it(), None) |
|
904 | 904 | |
|
905 | 905 | def __repr__(self): |
|
906 | 906 | d = {False: '-', True: '+'}[self._ascending] |
|
907 | 907 | return '<%s%s>' % (type(self).__name__, d) |
|
908 | 908 | |
|
909 | 909 | class spanset(abstractsmartset): |
|
910 | 910 | """Duck type for baseset class which represents a range of revisions and |
|
911 | 911 | can work lazily and without having all the range in memory |
|
912 | 912 | |
|
913 | 913 | Note that spanset(x, y) behave almost like xrange(x, y) except for two |
|
914 | 914 | notable points: |
|
915 | 915 | - when x < y it will be automatically descending, |
|
916 | 916 | - revision filtered with this repoview will be skipped. |
|
917 | 917 | |
|
918 | 918 | """ |
|
919 | 919 | def __init__(self, repo, start=0, end=None): |
|
920 | 920 | """ |
|
921 | 921 | start: first revision included the set |
|
922 | 922 | (default to 0) |
|
923 | 923 | end: first revision excluded (last+1) |
|
924 | 924 | (default to len(repo) |
|
925 | 925 | |
|
926 | 926 | Spanset will be descending if `end` < `start`. |
|
927 | 927 | """ |
|
928 | 928 | if end is None: |
|
929 | 929 | end = len(repo) |
|
930 | 930 | self._ascending = start <= end |
|
931 | 931 | if not self._ascending: |
|
932 | 932 | start, end = end + 1, start +1 |
|
933 | 933 | self._start = start |
|
934 | 934 | self._end = end |
|
935 | 935 | self._hiddenrevs = repo.changelog.filteredrevs |
|
936 | 936 | |
|
937 | 937 | def sort(self, reverse=False): |
|
938 | 938 | self._ascending = not reverse |
|
939 | 939 | |
|
940 | 940 | def reverse(self): |
|
941 | 941 | self._ascending = not self._ascending |
|
942 | 942 | |
|
943 | 943 | def istopo(self): |
|
944 | 944 | # not worth the trouble asserting if the two sets combined are still |
|
945 | 945 | # in topographical order. Use the sort() predicate to explicitly sort |
|
946 | 946 | # again instead. |
|
947 | 947 | return False |
|
948 | 948 | |
|
949 | 949 | def _iterfilter(self, iterrange): |
|
950 | 950 | s = self._hiddenrevs |
|
951 | 951 | for r in iterrange: |
|
952 | 952 | if r not in s: |
|
953 | 953 | yield r |
|
954 | 954 | |
|
955 | 955 | def __iter__(self): |
|
956 | 956 | if self._ascending: |
|
957 | 957 | return self.fastasc() |
|
958 | 958 | else: |
|
959 | 959 | return self.fastdesc() |
|
960 | 960 | |
|
961 | 961 | def fastasc(self): |
|
962 | 962 | iterrange = xrange(self._start, self._end) |
|
963 | 963 | if self._hiddenrevs: |
|
964 | 964 | return self._iterfilter(iterrange) |
|
965 | 965 | return iter(iterrange) |
|
966 | 966 | |
|
967 | 967 | def fastdesc(self): |
|
968 | 968 | iterrange = xrange(self._end - 1, self._start - 1, -1) |
|
969 | 969 | if self._hiddenrevs: |
|
970 | 970 | return self._iterfilter(iterrange) |
|
971 | 971 | return iter(iterrange) |
|
972 | 972 | |
|
973 | 973 | def __contains__(self, rev): |
|
974 | 974 | hidden = self._hiddenrevs |
|
975 | 975 | return ((self._start <= rev < self._end) |
|
976 | 976 | and not (hidden and rev in hidden)) |
|
977 | 977 | |
|
978 | 978 | def __nonzero__(self): |
|
979 | 979 | for r in self: |
|
980 | 980 | return True |
|
981 | 981 | return False |
|
982 | 982 | |
|
983 | 983 | __bool__ = __nonzero__ |
|
984 | 984 | |
|
985 | 985 | def __len__(self): |
|
986 | 986 | if not self._hiddenrevs: |
|
987 | 987 | return abs(self._end - self._start) |
|
988 | 988 | else: |
|
989 | 989 | count = 0 |
|
990 | 990 | start = self._start |
|
991 | 991 | end = self._end |
|
992 | 992 | for rev in self._hiddenrevs: |
|
993 | 993 | if (end < rev <= start) or (start <= rev < end): |
|
994 | 994 | count += 1 |
|
995 | 995 | return abs(self._end - self._start) - count |
|
996 | 996 | |
|
997 | 997 | def isascending(self): |
|
998 | 998 | return self._ascending |
|
999 | 999 | |
|
1000 | 1000 | def isdescending(self): |
|
1001 | 1001 | return not self._ascending |
|
1002 | 1002 | |
|
1003 | 1003 | def first(self): |
|
1004 | 1004 | if self._ascending: |
|
1005 | 1005 | it = self.fastasc |
|
1006 | 1006 | else: |
|
1007 | 1007 | it = self.fastdesc |
|
1008 | 1008 | for x in it(): |
|
1009 | 1009 | return x |
|
1010 | 1010 | return None |
|
1011 | 1011 | |
|
1012 | 1012 | def last(self): |
|
1013 | 1013 | if self._ascending: |
|
1014 | 1014 | it = self.fastdesc |
|
1015 | 1015 | else: |
|
1016 | 1016 | it = self.fastasc |
|
1017 | 1017 | for x in it(): |
|
1018 | 1018 | return x |
|
1019 | 1019 | return None |
|
1020 | 1020 | |
|
1021 | 1021 | def __repr__(self): |
|
1022 | 1022 | d = {False: '-', True: '+'}[self._ascending] |
|
1023 | 1023 | return '<%s%s %d:%d>' % (type(self).__name__, d, |
|
1024 | 1024 | self._start, self._end - 1) |
|
1025 | 1025 | |
|
1026 | 1026 | class fullreposet(spanset): |
|
1027 | 1027 | """a set containing all revisions in the repo |
|
1028 | 1028 | |
|
1029 | 1029 | This class exists to host special optimization and magic to handle virtual |
|
1030 | 1030 | revisions such as "null". |
|
1031 | 1031 | """ |
|
1032 | 1032 | |
|
1033 | 1033 | def __init__(self, repo): |
|
1034 | 1034 | super(fullreposet, self).__init__(repo) |
|
1035 | 1035 | |
|
1036 | 1036 | def __and__(self, other): |
|
1037 | 1037 | """As self contains the whole repo, all of the other set should also be |
|
1038 | 1038 | in self. Therefore `self & other = other`. |
|
1039 | 1039 | |
|
1040 | 1040 | This boldly assumes the other contains valid revs only. |
|
1041 | 1041 | """ |
|
1042 | 1042 | # other not a smartset, make is so |
|
1043 | 1043 | if not util.safehasattr(other, 'isascending'): |
|
1044 | 1044 | # filter out hidden revision |
|
1045 | 1045 | # (this boldly assumes all smartset are pure) |
|
1046 | 1046 | # |
|
1047 | 1047 | # `other` was used with "&", let's assume this is a set like |
|
1048 | 1048 | # object. |
|
1049 | 1049 | other = baseset(other - self._hiddenrevs) |
|
1050 | 1050 | |
|
1051 | 1051 | other.sort(reverse=self.isdescending()) |
|
1052 | 1052 | return other |
|
1053 | 1053 | |
|
1054 | 1054 | def prettyformat(revs): |
|
1055 | 1055 | lines = [] |
|
1056 | 1056 | rs = repr(revs) |
|
1057 | 1057 | p = 0 |
|
1058 | 1058 | while p < len(rs): |
|
1059 | 1059 | q = rs.find('<', p + 1) |
|
1060 | 1060 | if q < 0: |
|
1061 | 1061 | q = len(rs) |
|
1062 | 1062 | l = rs.count('<', 0, p) - rs.count('>', 0, p) |
|
1063 | 1063 | assert l >= 0 |
|
1064 | 1064 | lines.append((l, rs[p:q].rstrip())) |
|
1065 | 1065 | p = q |
|
1066 | 1066 | return '\n'.join(' ' * l + s for l, s in lines) |
General Comments 0
You need to be logged in to leave comments.
Login now