Show More
@@ -1,473 +1,472 | |||
|
1 | 1 | # rebase.py - rebasing feature for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms |
|
6 | 6 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | 7 | |
|
8 | 8 | '''move sets of revisions to a different ancestor |
|
9 | 9 | |
|
10 | 10 | This extension lets you rebase changesets in an existing Mercurial |
|
11 | 11 | repository. |
|
12 | 12 | |
|
13 | 13 | For more information: |
|
14 | 14 | http://www.selenic.com/mercurial/wiki/index.cgi/RebaseProject |
|
15 | 15 | ''' |
|
16 | 16 | |
|
17 | 17 | from mercurial import util, repair, merge, cmdutil, commands, error |
|
18 | 18 | from mercurial import extensions, ancestor, copies, patch |
|
19 | 19 | from mercurial.commands import templateopts |
|
20 | 20 | from mercurial.node import nullrev |
|
21 | 21 | from mercurial.lock import release |
|
22 | 22 | from mercurial.i18n import _ |
|
23 | 23 | import os, errno |
|
24 | 24 | |
|
25 | 25 | def rebasemerge(repo, rev, first=False): |
|
26 | 26 | 'return the correct ancestor' |
|
27 | 27 | oldancestor = ancestor.ancestor |
|
28 | 28 | |
|
29 | 29 | def newancestor(a, b, pfunc): |
|
30 | 30 | ancestor.ancestor = oldancestor |
|
31 | 31 | if b == rev: |
|
32 | 32 | return repo[rev].parents()[0].rev() |
|
33 | 33 | return ancestor.ancestor(a, b, pfunc) |
|
34 | 34 | |
|
35 | 35 | if not first: |
|
36 | 36 | ancestor.ancestor = newancestor |
|
37 | 37 | else: |
|
38 | 38 | repo.ui.debug(_("first revision, do not change ancestor\n")) |
|
39 | 39 | stats = merge.update(repo, rev, True, True, False) |
|
40 | 40 | return stats |
|
41 | 41 | |
|
42 | 42 | def rebase(ui, repo, **opts): |
|
43 | 43 | """move changeset (and descendants) to a different branch |
|
44 | 44 | |
|
45 | 45 | Rebase uses repeated merging to graft changesets from one part of |
|
46 | 46 | history onto another. This can be useful for linearizing local |
|
47 | 47 | changes relative to a master development tree. |
|
48 | 48 | |
|
49 | 49 | If a rebase is interrupted to manually resolve a merge, it can be |
|
50 | 50 | continued with --continue/-c or aborted with --abort/-a. |
|
51 | 51 | """ |
|
52 | 52 | originalwd = target = None |
|
53 | 53 | external = nullrev |
|
54 | 54 | state = skipped = {} |
|
55 | 55 | |
|
56 | 56 | lock = wlock = None |
|
57 | 57 | try: |
|
58 | 58 | lock = repo.lock() |
|
59 | 59 | wlock = repo.wlock() |
|
60 | 60 | |
|
61 | 61 | # Validate input and define rebasing points |
|
62 | 62 | destf = opts.get('dest', None) |
|
63 | 63 | srcf = opts.get('source', None) |
|
64 | 64 | basef = opts.get('base', None) |
|
65 | 65 | contf = opts.get('continue') |
|
66 | 66 | abortf = opts.get('abort') |
|
67 | 67 | collapsef = opts.get('collapse', False) |
|
68 | 68 | extrafn = opts.get('extrafn') |
|
69 | 69 | keepf = opts.get('keep', False) |
|
70 | 70 | keepbranchesf = opts.get('keepbranches', False) |
|
71 | 71 | |
|
72 | 72 | if contf or abortf: |
|
73 | 73 | if contf and abortf: |
|
74 | 74 | raise error.ParseError('rebase', |
|
75 | 75 | _('cannot use both abort and continue')) |
|
76 | 76 | if collapsef: |
|
77 | 77 | raise error.ParseError( |
|
78 | 78 | 'rebase', _('cannot use collapse with continue or abort')) |
|
79 | 79 | |
|
80 | 80 | if srcf or basef or destf: |
|
81 | 81 | raise error.ParseError('rebase', |
|
82 | 82 | _('abort and continue do not allow specifying revisions')) |
|
83 | 83 | |
|
84 | 84 | (originalwd, target, state, collapsef, keepf, |
|
85 | 85 | keepbranchesf, external) = restorestatus(repo) |
|
86 | 86 | if abortf: |
|
87 | 87 | abort(repo, originalwd, target, state) |
|
88 | 88 | return |
|
89 | 89 | else: |
|
90 | 90 | if srcf and basef: |
|
91 | 91 | raise error.ParseError('rebase', _('cannot specify both a ' |
|
92 | 92 | 'revision and a base')) |
|
93 | 93 | cmdutil.bail_if_changed(repo) |
|
94 | 94 | result = buildstate(repo, destf, srcf, basef, collapsef) |
|
95 | 95 | if result: |
|
96 | 96 | originalwd, target, state, external = result |
|
97 | 97 | else: # Empty state built, nothing to rebase |
|
98 | 98 | repo.ui.status(_('nothing to rebase\n')) |
|
99 | 99 | return |
|
100 | 100 | |
|
101 | 101 | if keepbranchesf: |
|
102 | 102 | if extrafn: |
|
103 | 103 | raise error.ParseError( |
|
104 | 104 | 'rebase', _('cannot use both keepbranches and extrafn')) |
|
105 | 105 | def extrafn(ctx, extra): |
|
106 | 106 | extra['branch'] = ctx.branch() |
|
107 | 107 | |
|
108 | 108 | # Rebase |
|
109 | 109 | targetancestors = list(repo.changelog.ancestors(target)) |
|
110 | 110 | targetancestors.append(target) |
|
111 | 111 | |
|
112 | 112 | for rev in util.sort(state): |
|
113 | 113 | if state[rev] == -1: |
|
114 | 114 | storestatus(repo, originalwd, target, state, collapsef, keepf, |
|
115 | 115 | keepbranchesf, external) |
|
116 | 116 | rebasenode(repo, rev, target, state, skipped, targetancestors, |
|
117 | 117 | collapsef, extrafn) |
|
118 | 118 | ui.note(_('rebase merging completed\n')) |
|
119 | 119 | |
|
120 | 120 | if collapsef: |
|
121 | 121 | p1, p2 = defineparents(repo, min(state), target, |
|
122 | 122 | state, targetancestors) |
|
123 | 123 | concludenode(repo, rev, p1, external, state, collapsef, |
|
124 | 124 | last=True, skipped=skipped, extrafn=extrafn) |
|
125 | 125 | |
|
126 | 126 | if 'qtip' in repo.tags(): |
|
127 | 127 | updatemq(repo, state, skipped, **opts) |
|
128 | 128 | |
|
129 | 129 | if not keepf: |
|
130 | 130 | # Remove no more useful revisions |
|
131 |
if |
|
|
132 | - set(state.keys())): | |
|
131 | if set(repo.changelog.descendants(min(state))) - set(state): | |
|
133 | 132 | ui.warn(_("warning: new changesets detected on source branch, " |
|
134 | 133 | "not stripping\n")) |
|
135 | 134 | else: |
|
136 | 135 | repair.strip(repo.ui, repo, repo[min(state)].node(), "strip") |
|
137 | 136 | |
|
138 | 137 | clearstatus(repo) |
|
139 | 138 | ui.status(_("rebase completed\n")) |
|
140 | 139 | if os.path.exists(repo.sjoin('undo')): |
|
141 | 140 | util.unlink(repo.sjoin('undo')) |
|
142 | 141 | if skipped: |
|
143 | 142 | ui.note(_("%d revisions have been skipped\n") % len(skipped)) |
|
144 | 143 | finally: |
|
145 | 144 | release(lock, wlock) |
|
146 | 145 | |
|
147 | 146 | def concludenode(repo, rev, p1, p2, state, collapse, last=False, skipped={}, |
|
148 | 147 | extrafn=None): |
|
149 | 148 | """Skip commit if collapsing has been required and rev is not the last |
|
150 | 149 | revision, commit otherwise |
|
151 | 150 | """ |
|
152 | 151 | repo.ui.debug(_(" set parents\n")) |
|
153 | 152 | if collapse and not last: |
|
154 | 153 | repo.dirstate.setparents(repo[p1].node()) |
|
155 | 154 | return None |
|
156 | 155 | |
|
157 | 156 | repo.dirstate.setparents(repo[p1].node(), repo[p2].node()) |
|
158 | 157 | |
|
159 | 158 | # Commit, record the old nodeid |
|
160 | 159 | m, a, r = repo.status()[:3] |
|
161 | 160 | newrev = nullrev |
|
162 | 161 | try: |
|
163 | 162 | if last: |
|
164 | 163 | commitmsg = 'Collapsed revision' |
|
165 | 164 | for rebased in state: |
|
166 | 165 | if rebased not in skipped: |
|
167 | 166 | commitmsg += '\n* %s' % repo[rebased].description() |
|
168 | 167 | commitmsg = repo.ui.edit(commitmsg, repo.ui.username()) |
|
169 | 168 | else: |
|
170 | 169 | commitmsg = repo[rev].description() |
|
171 | 170 | # Commit might fail if unresolved files exist |
|
172 | 171 | extra = {'rebase_source': repo[rev].hex()} |
|
173 | 172 | if extrafn: |
|
174 | 173 | extrafn(repo[rev], extra) |
|
175 | 174 | newrev = repo.commit(m+a+r, |
|
176 | 175 | text=commitmsg, |
|
177 | 176 | user=repo[rev].user(), |
|
178 | 177 | date=repo[rev].date(), |
|
179 | 178 | extra=extra) |
|
180 | 179 | return newrev |
|
181 | 180 | except util.Abort: |
|
182 | 181 | # Invalidate the previous setparents |
|
183 | 182 | repo.dirstate.invalidate() |
|
184 | 183 | raise |
|
185 | 184 | |
|
186 | 185 | def rebasenode(repo, rev, target, state, skipped, targetancestors, collapse, |
|
187 | 186 | extrafn): |
|
188 | 187 | 'Rebase a single revision' |
|
189 | 188 | repo.ui.debug(_("rebasing %d:%s\n") % (rev, repo[rev])) |
|
190 | 189 | |
|
191 | 190 | p1, p2 = defineparents(repo, rev, target, state, targetancestors) |
|
192 | 191 | |
|
193 | 192 | repo.ui.debug(_(" future parents are %d and %d\n") % (repo[p1].rev(), |
|
194 | 193 | repo[p2].rev())) |
|
195 | 194 | |
|
196 | 195 | # Merge phase |
|
197 | 196 | if len(repo.parents()) != 2: |
|
198 | 197 | # Update to target and merge it with local |
|
199 | 198 | if repo['.'].rev() != repo[p1].rev(): |
|
200 | 199 | repo.ui.debug(_(" update to %d:%s\n") % (repo[p1].rev(), repo[p1])) |
|
201 | 200 | merge.update(repo, p1, False, True, False) |
|
202 | 201 | else: |
|
203 | 202 | repo.ui.debug(_(" already in target\n")) |
|
204 | 203 | repo.dirstate.write() |
|
205 | 204 | repo.ui.debug(_(" merge against %d:%s\n") % (repo[rev].rev(), repo[rev])) |
|
206 | 205 | first = repo[rev].rev() == repo[min(state)].rev() |
|
207 | 206 | stats = rebasemerge(repo, rev, first) |
|
208 | 207 | |
|
209 | 208 | if stats[3] > 0: |
|
210 | 209 | raise util.Abort(_('fix unresolved conflicts with hg resolve then ' |
|
211 | 210 | 'run hg rebase --continue')) |
|
212 | 211 | else: # we have an interrupted rebase |
|
213 | 212 | repo.ui.debug(_('resuming interrupted rebase\n')) |
|
214 | 213 | |
|
215 | 214 | # Keep track of renamed files in the revision that is going to be rebased |
|
216 | 215 | # Here we simulate the copies and renames in the source changeset |
|
217 | 216 | cop, diver = copies.copies(repo, repo[rev], repo[target], repo[p2], True) |
|
218 | 217 | m1 = repo[rev].manifest() |
|
219 | 218 | m2 = repo[target].manifest() |
|
220 | 219 | for k, v in cop.iteritems(): |
|
221 | 220 | if k in m1: |
|
222 | 221 | if v in m1 or v in m2: |
|
223 | 222 | repo.dirstate.copy(v, k) |
|
224 | 223 | if v in m2 and v not in m1: |
|
225 | 224 | repo.dirstate.remove(v) |
|
226 | 225 | |
|
227 | 226 | newrev = concludenode(repo, rev, p1, p2, state, collapse, |
|
228 | 227 | extrafn=extrafn) |
|
229 | 228 | |
|
230 | 229 | # Update the state |
|
231 | 230 | if newrev is not None: |
|
232 | 231 | state[rev] = repo[newrev].rev() |
|
233 | 232 | else: |
|
234 | 233 | if not collapse: |
|
235 | 234 | repo.ui.note(_('no changes, revision %d skipped\n') % rev) |
|
236 | 235 | repo.ui.debug(_('next revision set to %s\n') % p1) |
|
237 | 236 | skipped[rev] = True |
|
238 | 237 | state[rev] = p1 |
|
239 | 238 | |
|
240 | 239 | def defineparents(repo, rev, target, state, targetancestors): |
|
241 | 240 | 'Return the new parent relationship of the revision that will be rebased' |
|
242 | 241 | parents = repo[rev].parents() |
|
243 | 242 | p1 = p2 = nullrev |
|
244 | 243 | |
|
245 | 244 | P1n = parents[0].rev() |
|
246 | 245 | if P1n in targetancestors: |
|
247 | 246 | p1 = target |
|
248 | 247 | elif P1n in state: |
|
249 | 248 | p1 = state[P1n] |
|
250 | 249 | else: # P1n external |
|
251 | 250 | p1 = target |
|
252 | 251 | p2 = P1n |
|
253 | 252 | |
|
254 | 253 | if len(parents) == 2 and parents[1].rev() not in targetancestors: |
|
255 | 254 | P2n = parents[1].rev() |
|
256 | 255 | # interesting second parent |
|
257 | 256 | if P2n in state: |
|
258 | 257 | if p1 == target: # P1n in targetancestors or external |
|
259 | 258 | p1 = state[P2n] |
|
260 | 259 | else: |
|
261 | 260 | p2 = state[P2n] |
|
262 | 261 | else: # P2n external |
|
263 | 262 | if p2 != nullrev: # P1n external too => rev is a merged revision |
|
264 | 263 | raise util.Abort(_('cannot use revision %d as base, result ' |
|
265 | 264 | 'would have 3 parents') % rev) |
|
266 | 265 | p2 = P2n |
|
267 | 266 | return p1, p2 |
|
268 | 267 | |
|
269 | 268 | def isagitpatch(repo, patchname): |
|
270 | 269 | 'Return true if the given patch is in git format' |
|
271 | 270 | mqpatch = os.path.join(repo.mq.path, patchname) |
|
272 | 271 | for line in patch.linereader(file(mqpatch, 'rb')): |
|
273 | 272 | if line.startswith('diff --git'): |
|
274 | 273 | return True |
|
275 | 274 | return False |
|
276 | 275 | |
|
277 | 276 | def updatemq(repo, state, skipped, **opts): |
|
278 | 277 | 'Update rebased mq patches - finalize and then import them' |
|
279 | 278 | mqrebase = {} |
|
280 | 279 | for p in repo.mq.applied: |
|
281 | 280 | if repo[p.rev].rev() in state: |
|
282 | 281 | repo.ui.debug(_('revision %d is an mq patch (%s), finalize it.\n') % |
|
283 | 282 | (repo[p.rev].rev(), p.name)) |
|
284 | 283 | mqrebase[repo[p.rev].rev()] = (p.name, isagitpatch(repo, p.name)) |
|
285 | 284 | |
|
286 | 285 | if mqrebase: |
|
287 | 286 | repo.mq.finish(repo, mqrebase.keys()) |
|
288 | 287 | |
|
289 | 288 | # We must start import from the newest revision |
|
290 | 289 | mq = mqrebase.keys() |
|
291 | 290 | mq.sort() |
|
292 | 291 | mq.reverse() |
|
293 | 292 | for rev in mq: |
|
294 | 293 | if rev not in skipped: |
|
295 | 294 | repo.ui.debug(_('import mq patch %d (%s)\n') |
|
296 | 295 | % (state[rev], mqrebase[rev][0])) |
|
297 | 296 | repo.mq.qimport(repo, (), patchname=mqrebase[rev][0], |
|
298 | 297 | git=mqrebase[rev][1],rev=[str(state[rev])]) |
|
299 | 298 | repo.mq.save_dirty() |
|
300 | 299 | |
|
301 | 300 | def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches, |
|
302 | 301 | external): |
|
303 | 302 | 'Store the current status to allow recovery' |
|
304 | 303 | f = repo.opener("rebasestate", "w") |
|
305 | 304 | f.write(repo[originalwd].hex() + '\n') |
|
306 | 305 | f.write(repo[target].hex() + '\n') |
|
307 | 306 | f.write(repo[external].hex() + '\n') |
|
308 | 307 | f.write('%d\n' % int(collapse)) |
|
309 | 308 | f.write('%d\n' % int(keep)) |
|
310 | 309 | f.write('%d\n' % int(keepbranches)) |
|
311 | 310 | for d, v in state.iteritems(): |
|
312 | 311 | oldrev = repo[d].hex() |
|
313 | 312 | newrev = repo[v].hex() |
|
314 | 313 | f.write("%s:%s\n" % (oldrev, newrev)) |
|
315 | 314 | f.close() |
|
316 | 315 | repo.ui.debug(_('rebase status stored\n')) |
|
317 | 316 | |
|
318 | 317 | def clearstatus(repo): |
|
319 | 318 | 'Remove the status files' |
|
320 | 319 | if os.path.exists(repo.join("rebasestate")): |
|
321 | 320 | util.unlink(repo.join("rebasestate")) |
|
322 | 321 | |
|
323 | 322 | def restorestatus(repo): |
|
324 | 323 | 'Restore a previously stored status' |
|
325 | 324 | try: |
|
326 | 325 | target = None |
|
327 | 326 | collapse = False |
|
328 | 327 | external = nullrev |
|
329 | 328 | state = {} |
|
330 | 329 | f = repo.opener("rebasestate") |
|
331 | 330 | for i, l in enumerate(f.read().splitlines()): |
|
332 | 331 | if i == 0: |
|
333 | 332 | originalwd = repo[l].rev() |
|
334 | 333 | elif i == 1: |
|
335 | 334 | target = repo[l].rev() |
|
336 | 335 | elif i == 2: |
|
337 | 336 | external = repo[l].rev() |
|
338 | 337 | elif i == 3: |
|
339 | 338 | collapse = bool(int(l)) |
|
340 | 339 | elif i == 4: |
|
341 | 340 | keep = bool(int(l)) |
|
342 | 341 | elif i == 5: |
|
343 | 342 | keepbranches = bool(int(l)) |
|
344 | 343 | else: |
|
345 | 344 | oldrev, newrev = l.split(':') |
|
346 | 345 | state[repo[oldrev].rev()] = repo[newrev].rev() |
|
347 | 346 | repo.ui.debug(_('rebase status resumed\n')) |
|
348 | 347 | return originalwd, target, state, collapse, keep, keepbranches, external |
|
349 | 348 | except IOError, err: |
|
350 | 349 | if err.errno != errno.ENOENT: |
|
351 | 350 | raise |
|
352 | 351 | raise util.Abort(_('no rebase in progress')) |
|
353 | 352 | |
|
354 | 353 | def abort(repo, originalwd, target, state): |
|
355 | 354 | 'Restore the repository to its original state' |
|
356 | 355 | if set(repo.changelog.descendants(target)) - set(state.values()): |
|
357 | 356 | repo.ui.warn(_("warning: new changesets detected on target branch, " |
|
358 | 357 | "not stripping\n")) |
|
359 | 358 | else: |
|
360 | 359 | # Strip from the first rebased revision |
|
361 | 360 | merge.update(repo, repo[originalwd].rev(), False, True, False) |
|
362 | 361 | rebased = filter(lambda x: x > -1, state.values()) |
|
363 | 362 | if rebased: |
|
364 | 363 | strippoint = min(rebased) |
|
365 | 364 | repair.strip(repo.ui, repo, repo[strippoint].node(), "strip") |
|
366 | 365 | clearstatus(repo) |
|
367 | 366 | repo.ui.status(_('rebase aborted\n')) |
|
368 | 367 | |
|
369 | 368 | def buildstate(repo, dest, src, base, collapse): |
|
370 | 369 | 'Define which revisions are going to be rebased and where' |
|
371 | 370 | targetancestors = set() |
|
372 | 371 | |
|
373 | 372 | if not dest: |
|
374 | 373 | # Destination defaults to the latest revision in the current branch |
|
375 | 374 | branch = repo[None].branch() |
|
376 | 375 | dest = repo[branch].rev() |
|
377 | 376 | else: |
|
378 | 377 | if 'qtip' in repo.tags() and (repo[dest].hex() in |
|
379 | 378 | [s.rev for s in repo.mq.applied]): |
|
380 | 379 | raise util.Abort(_('cannot rebase onto an applied mq patch')) |
|
381 | 380 | dest = repo[dest].rev() |
|
382 | 381 | |
|
383 | 382 | if src: |
|
384 | 383 | commonbase = repo[src].ancestor(repo[dest]) |
|
385 | 384 | if commonbase == repo[src]: |
|
386 | 385 | raise util.Abort(_('cannot rebase an ancestor')) |
|
387 | 386 | if commonbase == repo[dest]: |
|
388 | 387 | raise util.Abort(_('cannot rebase a descendant')) |
|
389 | 388 | source = repo[src].rev() |
|
390 | 389 | else: |
|
391 | 390 | if base: |
|
392 | 391 | cwd = repo[base].rev() |
|
393 | 392 | else: |
|
394 | 393 | cwd = repo['.'].rev() |
|
395 | 394 | |
|
396 | 395 | if cwd == dest: |
|
397 | 396 | repo.ui.debug(_('already working on current\n')) |
|
398 | 397 | return None |
|
399 | 398 | |
|
400 | 399 | targetancestors = set(repo.changelog.ancestors(dest)) |
|
401 | 400 | if cwd in targetancestors: |
|
402 | 401 | repo.ui.debug(_('already working on the current branch\n')) |
|
403 | 402 | return None |
|
404 | 403 | |
|
405 | 404 | cwdancestors = set(repo.changelog.ancestors(cwd)) |
|
406 | 405 | cwdancestors.add(cwd) |
|
407 | 406 | rebasingbranch = cwdancestors - targetancestors |
|
408 | 407 | source = min(rebasingbranch) |
|
409 | 408 | |
|
410 | 409 | repo.ui.debug(_('rebase onto %d starting from %d\n') % (dest, source)) |
|
411 | 410 | state = dict.fromkeys(repo.changelog.descendants(source), nullrev) |
|
412 | 411 | external = nullrev |
|
413 | 412 | if collapse: |
|
414 | 413 | if not targetancestors: |
|
415 | 414 | targetancestors = set(repo.changelog.ancestors(dest)) |
|
416 | 415 | for rev in state: |
|
417 | 416 | # Check externals and fail if there are more than one |
|
418 | 417 | for p in repo[rev].parents(): |
|
419 | 418 | if (p.rev() not in state and p.rev() != source |
|
420 | 419 | and p.rev() not in targetancestors): |
|
421 | 420 | if external != nullrev: |
|
422 | 421 | raise util.Abort(_('unable to collapse, there is more ' |
|
423 | 422 | 'than one external parent')) |
|
424 | 423 | external = p.rev() |
|
425 | 424 | |
|
426 | 425 | state[source] = nullrev |
|
427 | 426 | return repo['.'].rev(), repo[dest].rev(), state, external |
|
428 | 427 | |
|
429 | 428 | def pullrebase(orig, ui, repo, *args, **opts): |
|
430 | 429 | 'Call rebase after pull if the latter has been invoked with --rebase' |
|
431 | 430 | if opts.get('rebase'): |
|
432 | 431 | if opts.get('update'): |
|
433 | 432 | del opts.get['update'] |
|
434 | 433 | ui.debug(_('--update and --rebase are not compatible, ignoring ' |
|
435 | 434 | 'the update flag\n')) |
|
436 | 435 | |
|
437 | 436 | cmdutil.bail_if_changed(repo) |
|
438 | 437 | revsprepull = len(repo) |
|
439 | 438 | orig(ui, repo, *args, **opts) |
|
440 | 439 | revspostpull = len(repo) |
|
441 | 440 | if revspostpull > revsprepull: |
|
442 | 441 | rebase(ui, repo, **opts) |
|
443 | 442 | branch = repo[None].branch() |
|
444 | 443 | dest = repo[branch].rev() |
|
445 | 444 | if dest != repo['.'].rev(): |
|
446 | 445 | # there was nothing to rebase we force an update |
|
447 | 446 | merge.update(repo, dest, False, False, False) |
|
448 | 447 | else: |
|
449 | 448 | orig(ui, repo, *args, **opts) |
|
450 | 449 | |
|
451 | 450 | def uisetup(ui): |
|
452 | 451 | 'Replace pull with a decorator to provide --rebase option' |
|
453 | 452 | entry = extensions.wrapcommand(commands.table, 'pull', pullrebase) |
|
454 | 453 | entry[1].append(('', 'rebase', None, |
|
455 | 454 | _("rebase working directory to branch head")) |
|
456 | 455 | ) |
|
457 | 456 | |
|
458 | 457 | cmdtable = { |
|
459 | 458 | "rebase": |
|
460 | 459 | (rebase, |
|
461 | 460 | [ |
|
462 | 461 | ('s', 'source', '', _('rebase from a given revision')), |
|
463 | 462 | ('b', 'base', '', _('rebase from the base of a given revision')), |
|
464 | 463 | ('d', 'dest', '', _('rebase onto a given revision')), |
|
465 | 464 | ('', 'collapse', False, _('collapse the rebased revisions')), |
|
466 | 465 | ('', 'keep', False, _('keep original revisions')), |
|
467 | 466 | ('', 'keepbranches', False, _('keep original branches')), |
|
468 | 467 | ('c', 'continue', False, _('continue an interrupted rebase')), |
|
469 | 468 | ('a', 'abort', False, _('abort an interrupted rebase')),] + |
|
470 | 469 | templateopts, |
|
471 | 470 | _('hg rebase [-s REV | -b REV] [-d REV] [--collapse] [--keep] ' |
|
472 | 471 | '[--keepbranches] | [-c] | [-a]')), |
|
473 | 472 | } |
@@ -1,1369 +1,1369 | |||
|
1 | 1 | """ |
|
2 | 2 | revlog.py - storage back-end for mercurial |
|
3 | 3 | |
|
4 | 4 | This provides efficient delta storage with O(1) retrieve and append |
|
5 | 5 | and O(changes) merge between branches |
|
6 | 6 | |
|
7 | 7 | Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
8 | 8 | |
|
9 | 9 | This software may be used and distributed according to the terms |
|
10 | 10 | of the GNU General Public License, incorporated herein by reference. |
|
11 | 11 | """ |
|
12 | 12 | |
|
13 | 13 | # import stuff from node for others to import from revlog |
|
14 | 14 | from node import bin, hex, nullid, nullrev, short #@UnusedImport |
|
15 | 15 | from i18n import _ |
|
16 | 16 | import changegroup, errno, ancestor, mdiff, parsers |
|
17 | 17 | import struct, util, zlib, error |
|
18 | 18 | |
|
19 | 19 | _pack = struct.pack |
|
20 | 20 | _unpack = struct.unpack |
|
21 | 21 | _compress = zlib.compress |
|
22 | 22 | _decompress = zlib.decompress |
|
23 | 23 | _sha = util.sha1 |
|
24 | 24 | |
|
25 | 25 | # revlog flags |
|
26 | 26 | REVLOGV0 = 0 |
|
27 | 27 | REVLOGNG = 1 |
|
28 | 28 | REVLOGNGINLINEDATA = (1 << 16) |
|
29 | 29 | REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA |
|
30 | 30 | REVLOG_DEFAULT_FORMAT = REVLOGNG |
|
31 | 31 | REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS |
|
32 | 32 | |
|
33 | 33 | RevlogError = error.RevlogError |
|
34 | 34 | LookupError = error.LookupError |
|
35 | 35 | |
|
36 | 36 | def getoffset(q): |
|
37 | 37 | return int(q >> 16) |
|
38 | 38 | |
|
39 | 39 | def gettype(q): |
|
40 | 40 | return int(q & 0xFFFF) |
|
41 | 41 | |
|
42 | 42 | def offset_type(offset, type): |
|
43 | 43 | return long(long(offset) << 16 | type) |
|
44 | 44 | |
|
45 | 45 | nullhash = _sha(nullid) |
|
46 | 46 | |
|
47 | 47 | def hash(text, p1, p2): |
|
48 | 48 | """generate a hash from the given text and its parent hashes |
|
49 | 49 | |
|
50 | 50 | This hash combines both the current file contents and its history |
|
51 | 51 | in a manner that makes it easy to distinguish nodes with the same |
|
52 | 52 | content in the revision graph. |
|
53 | 53 | """ |
|
54 | 54 | # As of now, if one of the parent node is null, p2 is null |
|
55 | 55 | if p2 == nullid: |
|
56 | 56 | # deep copy of a hash is faster than creating one |
|
57 | 57 | s = nullhash.copy() |
|
58 | 58 | s.update(p1) |
|
59 | 59 | else: |
|
60 | 60 | # none of the parent nodes are nullid |
|
61 | 61 | l = [p1, p2] |
|
62 | 62 | l.sort() |
|
63 | 63 | s = _sha(l[0]) |
|
64 | 64 | s.update(l[1]) |
|
65 | 65 | s.update(text) |
|
66 | 66 | return s.digest() |
|
67 | 67 | |
|
68 | 68 | def compress(text): |
|
69 | 69 | """ generate a possibly-compressed representation of text """ |
|
70 | 70 | if not text: |
|
71 | 71 | return ("", text) |
|
72 | 72 | l = len(text) |
|
73 | 73 | bin = None |
|
74 | 74 | if l < 44: |
|
75 | 75 | pass |
|
76 | 76 | elif l > 1000000: |
|
77 | 77 | # zlib makes an internal copy, thus doubling memory usage for |
|
78 | 78 | # large files, so lets do this in pieces |
|
79 | 79 | z = zlib.compressobj() |
|
80 | 80 | p = [] |
|
81 | 81 | pos = 0 |
|
82 | 82 | while pos < l: |
|
83 | 83 | pos2 = pos + 2**20 |
|
84 | 84 | p.append(z.compress(text[pos:pos2])) |
|
85 | 85 | pos = pos2 |
|
86 | 86 | p.append(z.flush()) |
|
87 | 87 | if sum(map(len, p)) < l: |
|
88 | 88 | bin = "".join(p) |
|
89 | 89 | else: |
|
90 | 90 | bin = _compress(text) |
|
91 | 91 | if bin is None or len(bin) > l: |
|
92 | 92 | if text[0] == '\0': |
|
93 | 93 | return ("", text) |
|
94 | 94 | return ('u', text) |
|
95 | 95 | return ("", bin) |
|
96 | 96 | |
|
97 | 97 | def decompress(bin): |
|
98 | 98 | """ decompress the given input """ |
|
99 | 99 | if not bin: |
|
100 | 100 | return bin |
|
101 | 101 | t = bin[0] |
|
102 | 102 | if t == '\0': |
|
103 | 103 | return bin |
|
104 | 104 | if t == 'x': |
|
105 | 105 | return _decompress(bin) |
|
106 | 106 | if t == 'u': |
|
107 | 107 | return bin[1:] |
|
108 | 108 | raise RevlogError(_("unknown compression type %r") % t) |
|
109 | 109 | |
|
110 | 110 | class lazyparser(object): |
|
111 | 111 | """ |
|
112 | 112 | this class avoids the need to parse the entirety of large indices |
|
113 | 113 | """ |
|
114 | 114 | |
|
115 | 115 | # lazyparser is not safe to use on windows if win32 extensions not |
|
116 | 116 | # available. it keeps file handle open, which make it not possible |
|
117 | 117 | # to break hardlinks on local cloned repos. |
|
118 | 118 | |
|
119 | 119 | def __init__(self, dataf, size): |
|
120 | 120 | self.dataf = dataf |
|
121 | 121 | self.s = struct.calcsize(indexformatng) |
|
122 | 122 | self.datasize = size |
|
123 | 123 | self.l = size/self.s |
|
124 | 124 | self.index = [None] * self.l |
|
125 | 125 | self.map = {nullid: nullrev} |
|
126 | 126 | self.allmap = 0 |
|
127 | 127 | self.all = 0 |
|
128 | 128 | self.mapfind_count = 0 |
|
129 | 129 | |
|
130 | 130 | def loadmap(self): |
|
131 | 131 | """ |
|
132 | 132 | during a commit, we need to make sure the rev being added is |
|
133 | 133 | not a duplicate. This requires loading the entire index, |
|
134 | 134 | which is fairly slow. loadmap can load up just the node map, |
|
135 | 135 | which takes much less time. |
|
136 | 136 | """ |
|
137 | 137 | if self.allmap: |
|
138 | 138 | return |
|
139 | 139 | end = self.datasize |
|
140 | 140 | self.allmap = 1 |
|
141 | 141 | cur = 0 |
|
142 | 142 | count = 0 |
|
143 | 143 | blocksize = self.s * 256 |
|
144 | 144 | self.dataf.seek(0) |
|
145 | 145 | while cur < end: |
|
146 | 146 | data = self.dataf.read(blocksize) |
|
147 | 147 | off = 0 |
|
148 | 148 | for x in xrange(256): |
|
149 | 149 | n = data[off + ngshaoffset:off + ngshaoffset + 20] |
|
150 | 150 | self.map[n] = count |
|
151 | 151 | count += 1 |
|
152 | 152 | if count >= self.l: |
|
153 | 153 | break |
|
154 | 154 | off += self.s |
|
155 | 155 | cur += blocksize |
|
156 | 156 | |
|
157 | 157 | def loadblock(self, blockstart, blocksize, data=None): |
|
158 | 158 | if self.all: |
|
159 | 159 | return |
|
160 | 160 | if data is None: |
|
161 | 161 | self.dataf.seek(blockstart) |
|
162 | 162 | if blockstart + blocksize > self.datasize: |
|
163 | 163 | # the revlog may have grown since we've started running, |
|
164 | 164 | # but we don't have space in self.index for more entries. |
|
165 | 165 | # limit blocksize so that we don't get too much data. |
|
166 | 166 | blocksize = max(self.datasize - blockstart, 0) |
|
167 | 167 | data = self.dataf.read(blocksize) |
|
168 | 168 | lend = len(data) / self.s |
|
169 | 169 | i = blockstart / self.s |
|
170 | 170 | off = 0 |
|
171 | 171 | # lazyindex supports __delitem__ |
|
172 | 172 | if lend > len(self.index) - i: |
|
173 | 173 | lend = len(self.index) - i |
|
174 | 174 | for x in xrange(lend): |
|
175 | 175 | if self.index[i + x] == None: |
|
176 | 176 | b = data[off : off + self.s] |
|
177 | 177 | self.index[i + x] = b |
|
178 | 178 | n = b[ngshaoffset:ngshaoffset + 20] |
|
179 | 179 | self.map[n] = i + x |
|
180 | 180 | off += self.s |
|
181 | 181 | |
|
182 | 182 | def findnode(self, node): |
|
183 | 183 | """search backwards through the index file for a specific node""" |
|
184 | 184 | if self.allmap: |
|
185 | 185 | return None |
|
186 | 186 | |
|
187 | 187 | # hg log will cause many many searches for the manifest |
|
188 | 188 | # nodes. After we get called a few times, just load the whole |
|
189 | 189 | # thing. |
|
190 | 190 | if self.mapfind_count > 8: |
|
191 | 191 | self.loadmap() |
|
192 | 192 | if node in self.map: |
|
193 | 193 | return node |
|
194 | 194 | return None |
|
195 | 195 | self.mapfind_count += 1 |
|
196 | 196 | last = self.l - 1 |
|
197 | 197 | while self.index[last] != None: |
|
198 | 198 | if last == 0: |
|
199 | 199 | self.all = 1 |
|
200 | 200 | self.allmap = 1 |
|
201 | 201 | return None |
|
202 | 202 | last -= 1 |
|
203 | 203 | end = (last + 1) * self.s |
|
204 | 204 | blocksize = self.s * 256 |
|
205 | 205 | while end >= 0: |
|
206 | 206 | start = max(end - blocksize, 0) |
|
207 | 207 | self.dataf.seek(start) |
|
208 | 208 | data = self.dataf.read(end - start) |
|
209 | 209 | findend = end - start |
|
210 | 210 | while True: |
|
211 | 211 | # we're searching backwards, so we have to make sure |
|
212 | 212 | # we don't find a changeset where this node is a parent |
|
213 | 213 | off = data.find(node, 0, findend) |
|
214 | 214 | findend = off |
|
215 | 215 | if off >= 0: |
|
216 | 216 | i = off / self.s |
|
217 | 217 | off = i * self.s |
|
218 | 218 | n = data[off + ngshaoffset:off + ngshaoffset + 20] |
|
219 | 219 | if n == node: |
|
220 | 220 | self.map[n] = i + start / self.s |
|
221 | 221 | return node |
|
222 | 222 | else: |
|
223 | 223 | break |
|
224 | 224 | end -= blocksize |
|
225 | 225 | return None |
|
226 | 226 | |
|
227 | 227 | def loadindex(self, i=None, end=None): |
|
228 | 228 | if self.all: |
|
229 | 229 | return |
|
230 | 230 | all = False |
|
231 | 231 | if i == None: |
|
232 | 232 | blockstart = 0 |
|
233 | 233 | blocksize = (65536 / self.s) * self.s |
|
234 | 234 | end = self.datasize |
|
235 | 235 | all = True |
|
236 | 236 | else: |
|
237 | 237 | if end: |
|
238 | 238 | blockstart = i * self.s |
|
239 | 239 | end = end * self.s |
|
240 | 240 | blocksize = end - blockstart |
|
241 | 241 | else: |
|
242 | 242 | blockstart = (i & ~1023) * self.s |
|
243 | 243 | blocksize = self.s * 1024 |
|
244 | 244 | end = blockstart + blocksize |
|
245 | 245 | while blockstart < end: |
|
246 | 246 | self.loadblock(blockstart, blocksize) |
|
247 | 247 | blockstart += blocksize |
|
248 | 248 | if all: |
|
249 | 249 | self.all = True |
|
250 | 250 | |
|
251 | 251 | class lazyindex(object): |
|
252 | 252 | """a lazy version of the index array""" |
|
253 | 253 | def __init__(self, parser): |
|
254 | 254 | self.p = parser |
|
255 | 255 | def __len__(self): |
|
256 | 256 | return len(self.p.index) |
|
257 | 257 | def load(self, pos): |
|
258 | 258 | if pos < 0: |
|
259 | 259 | pos += len(self.p.index) |
|
260 | 260 | self.p.loadindex(pos) |
|
261 | 261 | return self.p.index[pos] |
|
262 | 262 | def __getitem__(self, pos): |
|
263 | 263 | return _unpack(indexformatng, self.p.index[pos] or self.load(pos)) |
|
264 | 264 | def __setitem__(self, pos, item): |
|
265 | 265 | self.p.index[pos] = _pack(indexformatng, *item) |
|
266 | 266 | def __delitem__(self, pos): |
|
267 | 267 | del self.p.index[pos] |
|
268 | 268 | def insert(self, pos, e): |
|
269 | 269 | self.p.index.insert(pos, _pack(indexformatng, *e)) |
|
270 | 270 | def append(self, e): |
|
271 | 271 | self.p.index.append(_pack(indexformatng, *e)) |
|
272 | 272 | |
|
273 | 273 | class lazymap(object): |
|
274 | 274 | """a lazy version of the node map""" |
|
275 | 275 | def __init__(self, parser): |
|
276 | 276 | self.p = parser |
|
277 | 277 | def load(self, key): |
|
278 | 278 | n = self.p.findnode(key) |
|
279 | 279 | if n == None: |
|
280 | 280 | raise KeyError(key) |
|
281 | 281 | def __contains__(self, key): |
|
282 | 282 | if key in self.p.map: |
|
283 | 283 | return True |
|
284 | 284 | self.p.loadmap() |
|
285 | 285 | return key in self.p.map |
|
286 | 286 | def __iter__(self): |
|
287 | 287 | yield nullid |
|
288 | 288 | for i in xrange(self.p.l): |
|
289 | 289 | ret = self.p.index[i] |
|
290 | 290 | if not ret: |
|
291 | 291 | self.p.loadindex(i) |
|
292 | 292 | ret = self.p.index[i] |
|
293 | 293 | if isinstance(ret, str): |
|
294 | 294 | ret = _unpack(indexformatng, ret) |
|
295 | 295 | yield ret[7] |
|
296 | 296 | def __getitem__(self, key): |
|
297 | 297 | try: |
|
298 | 298 | return self.p.map[key] |
|
299 | 299 | except KeyError: |
|
300 | 300 | try: |
|
301 | 301 | self.load(key) |
|
302 | 302 | return self.p.map[key] |
|
303 | 303 | except KeyError: |
|
304 | 304 | raise KeyError("node " + hex(key)) |
|
305 | 305 | def __setitem__(self, key, val): |
|
306 | 306 | self.p.map[key] = val |
|
307 | 307 | def __delitem__(self, key): |
|
308 | 308 | del self.p.map[key] |
|
309 | 309 | |
|
310 | 310 | indexformatv0 = ">4l20s20s20s" |
|
311 | 311 | v0shaoffset = 56 |
|
312 | 312 | |
|
313 | 313 | class revlogoldio(object): |
|
314 | 314 | def __init__(self): |
|
315 | 315 | self.size = struct.calcsize(indexformatv0) |
|
316 | 316 | |
|
317 | 317 | def parseindex(self, fp, inline): |
|
318 | 318 | s = self.size |
|
319 | 319 | index = [] |
|
320 | 320 | nodemap = {nullid: nullrev} |
|
321 | 321 | n = off = 0 |
|
322 | 322 | data = fp.read() |
|
323 | 323 | l = len(data) |
|
324 | 324 | while off + s <= l: |
|
325 | 325 | cur = data[off:off + s] |
|
326 | 326 | off += s |
|
327 | 327 | e = _unpack(indexformatv0, cur) |
|
328 | 328 | # transform to revlogv1 format |
|
329 | 329 | e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], |
|
330 | 330 | nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) |
|
331 | 331 | index.append(e2) |
|
332 | 332 | nodemap[e[6]] = n |
|
333 | 333 | n += 1 |
|
334 | 334 | |
|
335 | 335 | return index, nodemap, None |
|
336 | 336 | |
|
337 | 337 | def packentry(self, entry, node, version, rev): |
|
338 | 338 | e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], |
|
339 | 339 | node(entry[5]), node(entry[6]), entry[7]) |
|
340 | 340 | return _pack(indexformatv0, *e2) |
|
341 | 341 | |
|
342 | 342 | # index ng: |
|
343 | 343 | # 6 bytes offset |
|
344 | 344 | # 2 bytes flags |
|
345 | 345 | # 4 bytes compressed length |
|
346 | 346 | # 4 bytes uncompressed length |
|
347 | 347 | # 4 bytes: base rev |
|
348 | 348 | # 4 bytes link rev |
|
349 | 349 | # 4 bytes parent 1 rev |
|
350 | 350 | # 4 bytes parent 2 rev |
|
351 | 351 | # 32 bytes: nodeid |
|
352 | 352 | indexformatng = ">Qiiiiii20s12x" |
|
353 | 353 | ngshaoffset = 32 |
|
354 | 354 | versionformat = ">I" |
|
355 | 355 | |
|
356 | 356 | class revlogio(object): |
|
357 | 357 | def __init__(self): |
|
358 | 358 | self.size = struct.calcsize(indexformatng) |
|
359 | 359 | |
|
360 | 360 | def parseindex(self, fp, inline): |
|
361 | 361 | try: |
|
362 | 362 | size = util.fstat(fp).st_size |
|
363 | 363 | except AttributeError: |
|
364 | 364 | size = 0 |
|
365 | 365 | |
|
366 | 366 | if util.openhardlinks() and not inline and size > 1000000: |
|
367 | 367 | # big index, let's parse it on demand |
|
368 | 368 | parser = lazyparser(fp, size) |
|
369 | 369 | index = lazyindex(parser) |
|
370 | 370 | nodemap = lazymap(parser) |
|
371 | 371 | e = list(index[0]) |
|
372 | 372 | type = gettype(e[0]) |
|
373 | 373 | e[0] = offset_type(0, type) |
|
374 | 374 | index[0] = e |
|
375 | 375 | return index, nodemap, None |
|
376 | 376 | |
|
377 | 377 | data = fp.read() |
|
378 | 378 | # call the C implementation to parse the index data |
|
379 | 379 | index, nodemap, cache = parsers.parse_index(data, inline) |
|
380 | 380 | return index, nodemap, cache |
|
381 | 381 | |
|
382 | 382 | def packentry(self, entry, node, version, rev): |
|
383 | 383 | p = _pack(indexformatng, *entry) |
|
384 | 384 | if rev == 0: |
|
385 | 385 | p = _pack(versionformat, version) + p[4:] |
|
386 | 386 | return p |
|
387 | 387 | |
|
388 | 388 | class revlog(object): |
|
389 | 389 | """ |
|
390 | 390 | the underlying revision storage object |
|
391 | 391 | |
|
392 | 392 | A revlog consists of two parts, an index and the revision data. |
|
393 | 393 | |
|
394 | 394 | The index is a file with a fixed record size containing |
|
395 | 395 | information on each revision, including its nodeid (hash), the |
|
396 | 396 | nodeids of its parents, the position and offset of its data within |
|
397 | 397 | the data file, and the revision it's based on. Finally, each entry |
|
398 | 398 | contains a linkrev entry that can serve as a pointer to external |
|
399 | 399 | data. |
|
400 | 400 | |
|
401 | 401 | The revision data itself is a linear collection of data chunks. |
|
402 | 402 | Each chunk represents a revision and is usually represented as a |
|
403 | 403 | delta against the previous chunk. To bound lookup time, runs of |
|
404 | 404 | deltas are limited to about 2 times the length of the original |
|
405 | 405 | version data. This makes retrieval of a version proportional to |
|
406 | 406 | its size, or O(1) relative to the number of revisions. |
|
407 | 407 | |
|
408 | 408 | Both pieces of the revlog are written to in an append-only |
|
409 | 409 | fashion, which means we never need to rewrite a file to insert or |
|
410 | 410 | remove data, and can use some simple techniques to avoid the need |
|
411 | 411 | for locking while reading. |
|
412 | 412 | """ |
|
413 | 413 | def __init__(self, opener, indexfile): |
|
414 | 414 | """ |
|
415 | 415 | create a revlog object |
|
416 | 416 | |
|
417 | 417 | opener is a function that abstracts the file opening operation |
|
418 | 418 | and can be used to implement COW semantics or the like. |
|
419 | 419 | """ |
|
420 | 420 | self.indexfile = indexfile |
|
421 | 421 | self.datafile = indexfile[:-2] + ".d" |
|
422 | 422 | self.opener = opener |
|
423 | 423 | self._cache = None |
|
424 | 424 | self._chunkcache = None |
|
425 | 425 | self.nodemap = {nullid: nullrev} |
|
426 | 426 | self.index = [] |
|
427 | 427 | |
|
428 | 428 | v = REVLOG_DEFAULT_VERSION |
|
429 | 429 | if hasattr(opener, "defversion"): |
|
430 | 430 | v = opener.defversion |
|
431 | 431 | if v & REVLOGNG: |
|
432 | 432 | v |= REVLOGNGINLINEDATA |
|
433 | 433 | |
|
434 | 434 | i = "" |
|
435 | 435 | try: |
|
436 | 436 | f = self.opener(self.indexfile) |
|
437 | 437 | i = f.read(4) |
|
438 | 438 | f.seek(0) |
|
439 | 439 | if len(i) > 0: |
|
440 | 440 | v = struct.unpack(versionformat, i)[0] |
|
441 | 441 | except IOError, inst: |
|
442 | 442 | if inst.errno != errno.ENOENT: |
|
443 | 443 | raise |
|
444 | 444 | |
|
445 | 445 | self.version = v |
|
446 | 446 | self._inline = v & REVLOGNGINLINEDATA |
|
447 | 447 | flags = v & ~0xFFFF |
|
448 | 448 | fmt = v & 0xFFFF |
|
449 | 449 | if fmt == REVLOGV0 and flags: |
|
450 | 450 | raise RevlogError(_("index %s unknown flags %#04x for format v0") |
|
451 | 451 | % (self.indexfile, flags >> 16)) |
|
452 | 452 | elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA: |
|
453 | 453 | raise RevlogError(_("index %s unknown flags %#04x for revlogng") |
|
454 | 454 | % (self.indexfile, flags >> 16)) |
|
455 | 455 | elif fmt > REVLOGNG: |
|
456 | 456 | raise RevlogError(_("index %s unknown format %d") |
|
457 | 457 | % (self.indexfile, fmt)) |
|
458 | 458 | |
|
459 | 459 | self._io = revlogio() |
|
460 | 460 | if self.version == REVLOGV0: |
|
461 | 461 | self._io = revlogoldio() |
|
462 | 462 | if i: |
|
463 | 463 | try: |
|
464 | 464 | d = self._io.parseindex(f, self._inline) |
|
465 | 465 | except (ValueError, IndexError), e: |
|
466 | 466 | raise RevlogError(_("index %s is corrupted") % (self.indexfile)) |
|
467 | 467 | self.index, self.nodemap, self._chunkcache = d |
|
468 | 468 | |
|
469 | 469 | # add the magic null revision at -1 (if it hasn't been done already) |
|
470 | 470 | if (self.index == [] or isinstance(self.index, lazyindex) or |
|
471 | 471 | self.index[-1][7] != nullid) : |
|
472 | 472 | self.index.append((0, 0, 0, -1, -1, -1, -1, nullid)) |
|
473 | 473 | |
|
474 | 474 | def _loadindex(self, start, end): |
|
475 | 475 | """load a block of indexes all at once from the lazy parser""" |
|
476 | 476 | if isinstance(self.index, lazyindex): |
|
477 | 477 | self.index.p.loadindex(start, end) |
|
478 | 478 | |
|
479 | 479 | def _loadindexmap(self): |
|
480 | 480 | """loads both the map and the index from the lazy parser""" |
|
481 | 481 | if isinstance(self.index, lazyindex): |
|
482 | 482 | p = self.index.p |
|
483 | 483 | p.loadindex() |
|
484 | 484 | self.nodemap = p.map |
|
485 | 485 | |
|
486 | 486 | def _loadmap(self): |
|
487 | 487 | """loads the map from the lazy parser""" |
|
488 | 488 | if isinstance(self.nodemap, lazymap): |
|
489 | 489 | self.nodemap.p.loadmap() |
|
490 | 490 | self.nodemap = self.nodemap.p.map |
|
491 | 491 | |
|
492 | 492 | def tip(self): |
|
493 | 493 | return self.node(len(self.index) - 2) |
|
494 | 494 | def __len__(self): |
|
495 | 495 | return len(self.index) - 1 |
|
496 | 496 | def __iter__(self): |
|
497 | 497 | for i in xrange(len(self)): |
|
498 | 498 | yield i |
|
499 | 499 | def rev(self, node): |
|
500 | 500 | try: |
|
501 | 501 | return self.nodemap[node] |
|
502 | 502 | except KeyError: |
|
503 | 503 | raise LookupError(node, self.indexfile, _('no node')) |
|
504 | 504 | def node(self, rev): |
|
505 | 505 | return self.index[rev][7] |
|
506 | 506 | def linkrev(self, rev): |
|
507 | 507 | return self.index[rev][4] |
|
508 | 508 | def parents(self, node): |
|
509 | 509 | i = self.index |
|
510 | 510 | d = i[self.rev(node)] |
|
511 | 511 | return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline |
|
512 | 512 | def parentrevs(self, rev): |
|
513 | 513 | return self.index[rev][5:7] |
|
514 | 514 | def start(self, rev): |
|
515 | 515 | return int(self.index[rev][0] >> 16) |
|
516 | 516 | def end(self, rev): |
|
517 | 517 | return self.start(rev) + self.length(rev) |
|
518 | 518 | def length(self, rev): |
|
519 | 519 | return self.index[rev][1] |
|
520 | 520 | def base(self, rev): |
|
521 | 521 | return self.index[rev][3] |
|
522 | 522 | |
|
523 | 523 | def size(self, rev): |
|
524 | 524 | """return the length of the uncompressed text for a given revision""" |
|
525 | 525 | l = self.index[rev][2] |
|
526 | 526 | if l >= 0: |
|
527 | 527 | return l |
|
528 | 528 | |
|
529 | 529 | t = self.revision(self.node(rev)) |
|
530 | 530 | return len(t) |
|
531 | 531 | |
|
532 | 532 | # alternate implementation, The advantage to this code is it |
|
533 | 533 | # will be faster for a single revision. But, the results are not |
|
534 | 534 | # cached, so finding the size of every revision will be slower. |
|
535 | 535 | """ |
|
536 | 536 | if self.cache and self.cache[1] == rev: |
|
537 | 537 | return len(self.cache[2]) |
|
538 | 538 | |
|
539 | 539 | base = self.base(rev) |
|
540 | 540 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: |
|
541 | 541 | base = self.cache[1] |
|
542 | 542 | text = self.cache[2] |
|
543 | 543 | else: |
|
544 | 544 | text = self.revision(self.node(base)) |
|
545 | 545 | |
|
546 | 546 | l = len(text) |
|
547 | 547 | for x in xrange(base + 1, rev + 1): |
|
548 | 548 | l = mdiff.patchedsize(l, self.chunk(x)) |
|
549 | 549 | return l |
|
550 | 550 | """ |
|
551 | 551 | |
|
552 | 552 | def reachable(self, node, stop=None): |
|
553 | 553 | """return a hash of all nodes ancestral to a given node, including |
|
554 | 554 | the node itself, stopping when stop is matched""" |
|
555 | 555 | reachable = {} |
|
556 | 556 | visit = [node] |
|
557 | 557 | reachable[node] = 1 |
|
558 | 558 | if stop: |
|
559 | 559 | stopn = self.rev(stop) |
|
560 | 560 | else: |
|
561 | 561 | stopn = 0 |
|
562 | 562 | while visit: |
|
563 | 563 | n = visit.pop(0) |
|
564 | 564 | if n == stop: |
|
565 | 565 | continue |
|
566 | 566 | if n == nullid: |
|
567 | 567 | continue |
|
568 | 568 | for p in self.parents(n): |
|
569 | 569 | if self.rev(p) < stopn: |
|
570 | 570 | continue |
|
571 | 571 | if p not in reachable: |
|
572 | 572 | reachable[p] = 1 |
|
573 | 573 | visit.append(p) |
|
574 | 574 | return reachable |
|
575 | 575 | |
|
576 | 576 | def ancestors(self, *revs): |
|
577 | 577 | 'Generate the ancestors of revs using a breadth-first visit' |
|
578 | 578 | visit = list(revs) |
|
579 | 579 | seen = set([nullrev]) |
|
580 | 580 | while visit: |
|
581 | 581 | for parent in self.parentrevs(visit.pop(0)): |
|
582 | 582 | if parent not in seen: |
|
583 | 583 | visit.append(parent) |
|
584 | 584 | seen.add(parent) |
|
585 | 585 | yield parent |
|
586 | 586 | |
|
587 | 587 | def descendants(self, *revs): |
|
588 | 588 | 'Generate the descendants of revs in topological order' |
|
589 | 589 | seen = set(revs) |
|
590 | 590 | for i in xrange(min(revs) + 1, len(self)): |
|
591 | 591 | for x in self.parentrevs(i): |
|
592 | 592 | if x != nullrev and x in seen: |
|
593 | 593 | seen.add(i) |
|
594 | 594 | yield i |
|
595 | 595 | break |
|
596 | 596 | |
|
597 | 597 | def findmissing(self, common=None, heads=None): |
|
598 | 598 | ''' |
|
599 | 599 | returns the topologically sorted list of nodes from the set: |
|
600 | 600 | missing = (ancestors(heads) \ ancestors(common)) |
|
601 | 601 | |
|
602 | 602 | where ancestors() is the set of ancestors from heads, heads included |
|
603 | 603 | |
|
604 | 604 | if heads is None, the heads of the revlog are used |
|
605 | 605 | if common is None, nullid is assumed to be a common node |
|
606 | 606 | ''' |
|
607 | 607 | if common is None: |
|
608 | 608 | common = [nullid] |
|
609 | 609 | if heads is None: |
|
610 | 610 | heads = self.heads() |
|
611 | 611 | |
|
612 | 612 | common = [self.rev(n) for n in common] |
|
613 | 613 | heads = [self.rev(n) for n in heads] |
|
614 | 614 | |
|
615 | 615 | # we want the ancestors, but inclusive |
|
616 | 616 | has = set(self.ancestors(*common)) |
|
617 | 617 | has.add(nullrev) |
|
618 | 618 | has.update(common) |
|
619 | 619 | |
|
620 | 620 | # take all ancestors from heads that aren't in has |
|
621 | 621 | missing = {} |
|
622 | 622 | visit = [r for r in heads if r not in has] |
|
623 | 623 | while visit: |
|
624 | 624 | r = visit.pop(0) |
|
625 | 625 | if r in missing: |
|
626 | 626 | continue |
|
627 | 627 | else: |
|
628 | 628 | missing[r] = None |
|
629 | 629 | for p in self.parentrevs(r): |
|
630 | 630 | if p not in has: |
|
631 | 631 | visit.append(p) |
|
632 | 632 | missing = missing.keys() |
|
633 | 633 | missing.sort() |
|
634 | 634 | return [self.node(r) for r in missing] |
|
635 | 635 | |
|
636 | 636 | def nodesbetween(self, roots=None, heads=None): |
|
637 | 637 | """Return a tuple containing three elements. Elements 1 and 2 contain |
|
638 | 638 | a final list bases and heads after all the unreachable ones have been |
|
639 | 639 | pruned. Element 0 contains a topologically sorted list of all |
|
640 | 640 | |
|
641 | 641 | nodes that satisfy these constraints: |
|
642 | 642 | 1. All nodes must be descended from a node in roots (the nodes on |
|
643 | 643 | roots are considered descended from themselves). |
|
644 | 644 | 2. All nodes must also be ancestors of a node in heads (the nodes in |
|
645 | 645 | heads are considered to be their own ancestors). |
|
646 | 646 | |
|
647 | 647 | If roots is unspecified, nullid is assumed as the only root. |
|
648 | 648 | If heads is unspecified, it is taken to be the output of the |
|
649 | 649 | heads method (i.e. a list of all nodes in the repository that |
|
650 | 650 | have no children).""" |
|
651 | 651 | nonodes = ([], [], []) |
|
652 | 652 | if roots is not None: |
|
653 | 653 | roots = list(roots) |
|
654 | 654 | if not roots: |
|
655 | 655 | return nonodes |
|
656 | 656 | lowestrev = min([self.rev(n) for n in roots]) |
|
657 | 657 | else: |
|
658 | 658 | roots = [nullid] # Everybody's a descendent of nullid |
|
659 | 659 | lowestrev = nullrev |
|
660 | 660 | if (lowestrev == nullrev) and (heads is None): |
|
661 | 661 | # We want _all_ the nodes! |
|
662 | 662 | return ([self.node(r) for r in self], [nullid], list(self.heads())) |
|
663 | 663 | if heads is None: |
|
664 | 664 | # All nodes are ancestors, so the latest ancestor is the last |
|
665 | 665 | # node. |
|
666 | 666 | highestrev = len(self) - 1 |
|
667 | 667 | # Set ancestors to None to signal that every node is an ancestor. |
|
668 | 668 | ancestors = None |
|
669 | 669 | # Set heads to an empty dictionary for later discovery of heads |
|
670 | 670 | heads = {} |
|
671 | 671 | else: |
|
672 | 672 | heads = list(heads) |
|
673 | 673 | if not heads: |
|
674 | 674 | return nonodes |
|
675 | 675 | ancestors = {} |
|
676 | 676 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
677 | 677 | # Also, later we will be using it to filter out the heads we can't |
|
678 | 678 | # find from roots. |
|
679 | 679 | heads = dict.fromkeys(heads, 0) |
|
680 | 680 | # Start at the top and keep marking parents until we're done. |
|
681 |
nodestotag = set(heads |
|
|
681 | nodestotag = set(heads) | |
|
682 | 682 | # Remember where the top was so we can use it as a limit later. |
|
683 | 683 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
684 | 684 | while nodestotag: |
|
685 | 685 | # grab a node to tag |
|
686 | 686 | n = nodestotag.pop() |
|
687 | 687 | # Never tag nullid |
|
688 | 688 | if n == nullid: |
|
689 | 689 | continue |
|
690 | 690 | # A node's revision number represents its place in a |
|
691 | 691 | # topologically sorted list of nodes. |
|
692 | 692 | r = self.rev(n) |
|
693 | 693 | if r >= lowestrev: |
|
694 | 694 | if n not in ancestors: |
|
695 | 695 | # If we are possibly a descendent of one of the roots |
|
696 | 696 | # and we haven't already been marked as an ancestor |
|
697 | 697 | ancestors[n] = 1 # Mark as ancestor |
|
698 | 698 | # Add non-nullid parents to list of nodes to tag. |
|
699 | 699 | nodestotag.update([p for p in self.parents(n) if |
|
700 | 700 | p != nullid]) |
|
701 | 701 | elif n in heads: # We've seen it before, is it a fake head? |
|
702 | 702 | # So it is, real heads should not be the ancestors of |
|
703 | 703 | # any other heads. |
|
704 | 704 | heads.pop(n) |
|
705 | 705 | if not ancestors: |
|
706 | 706 | return nonodes |
|
707 | 707 | # Now that we have our set of ancestors, we want to remove any |
|
708 | 708 | # roots that are not ancestors. |
|
709 | 709 | |
|
710 | 710 | # If one of the roots was nullid, everything is included anyway. |
|
711 | 711 | if lowestrev > nullrev: |
|
712 | 712 | # But, since we weren't, let's recompute the lowest rev to not |
|
713 | 713 | # include roots that aren't ancestors. |
|
714 | 714 | |
|
715 | 715 | # Filter out roots that aren't ancestors of heads |
|
716 | 716 | roots = [n for n in roots if n in ancestors] |
|
717 | 717 | # Recompute the lowest revision |
|
718 | 718 | if roots: |
|
719 | 719 | lowestrev = min([self.rev(n) for n in roots]) |
|
720 | 720 | else: |
|
721 | 721 | # No more roots? Return empty list |
|
722 | 722 | return nonodes |
|
723 | 723 | else: |
|
724 | 724 | # We are descending from nullid, and don't need to care about |
|
725 | 725 | # any other roots. |
|
726 | 726 | lowestrev = nullrev |
|
727 | 727 | roots = [nullid] |
|
728 | 728 | # Transform our roots list into a set. |
|
729 | 729 | descendents = set(roots) |
|
730 | 730 | # Also, keep the original roots so we can filter out roots that aren't |
|
731 | 731 | # 'real' roots (i.e. are descended from other roots). |
|
732 | 732 | roots = descendents.copy() |
|
733 | 733 | # Our topologically sorted list of output nodes. |
|
734 | 734 | orderedout = [] |
|
735 | 735 | # Don't start at nullid since we don't want nullid in our output list, |
|
736 | 736 | # and if nullid shows up in descedents, empty parents will look like |
|
737 | 737 | # they're descendents. |
|
738 | 738 | for r in xrange(max(lowestrev, 0), highestrev + 1): |
|
739 | 739 | n = self.node(r) |
|
740 | 740 | isdescendent = False |
|
741 | 741 | if lowestrev == nullrev: # Everybody is a descendent of nullid |
|
742 | 742 | isdescendent = True |
|
743 | 743 | elif n in descendents: |
|
744 | 744 | # n is already a descendent |
|
745 | 745 | isdescendent = True |
|
746 | 746 | # This check only needs to be done here because all the roots |
|
747 | 747 | # will start being marked is descendents before the loop. |
|
748 | 748 | if n in roots: |
|
749 | 749 | # If n was a root, check if it's a 'real' root. |
|
750 | 750 | p = tuple(self.parents(n)) |
|
751 | 751 | # If any of its parents are descendents, it's not a root. |
|
752 | 752 | if (p[0] in descendents) or (p[1] in descendents): |
|
753 | 753 | roots.remove(n) |
|
754 | 754 | else: |
|
755 | 755 | p = tuple(self.parents(n)) |
|
756 | 756 | # A node is a descendent if either of its parents are |
|
757 | 757 | # descendents. (We seeded the dependents list with the roots |
|
758 | 758 | # up there, remember?) |
|
759 | 759 | if (p[0] in descendents) or (p[1] in descendents): |
|
760 | 760 | descendents.add(n) |
|
761 | 761 | isdescendent = True |
|
762 | 762 | if isdescendent and ((ancestors is None) or (n in ancestors)): |
|
763 | 763 | # Only include nodes that are both descendents and ancestors. |
|
764 | 764 | orderedout.append(n) |
|
765 | 765 | if (ancestors is not None) and (n in heads): |
|
766 | 766 | # We're trying to figure out which heads are reachable |
|
767 | 767 | # from roots. |
|
768 | 768 | # Mark this head as having been reached |
|
769 | 769 | heads[n] = 1 |
|
770 | 770 | elif ancestors is None: |
|
771 | 771 | # Otherwise, we're trying to discover the heads. |
|
772 | 772 | # Assume this is a head because if it isn't, the next step |
|
773 | 773 | # will eventually remove it. |
|
774 | 774 | heads[n] = 1 |
|
775 | 775 | # But, obviously its parents aren't. |
|
776 | 776 | for p in self.parents(n): |
|
777 | 777 | heads.pop(p, None) |
|
778 | 778 | heads = [n for n in heads.iterkeys() if heads[n] != 0] |
|
779 | 779 | roots = list(roots) |
|
780 | 780 | assert orderedout |
|
781 | 781 | assert roots |
|
782 | 782 | assert heads |
|
783 | 783 | return (orderedout, roots, heads) |
|
784 | 784 | |
|
785 | 785 | def heads(self, start=None, stop=None): |
|
786 | 786 | """return the list of all nodes that have no children |
|
787 | 787 | |
|
788 | 788 | if start is specified, only heads that are descendants of |
|
789 | 789 | start will be returned |
|
790 | 790 | if stop is specified, it will consider all the revs from stop |
|
791 | 791 | as if they had no children |
|
792 | 792 | """ |
|
793 | 793 | if start is None and stop is None: |
|
794 | 794 | count = len(self) |
|
795 | 795 | if not count: |
|
796 | 796 | return [nullid] |
|
797 | 797 | ishead = [1] * (count + 1) |
|
798 | 798 | index = self.index |
|
799 | 799 | for r in xrange(count): |
|
800 | 800 | e = index[r] |
|
801 | 801 | ishead[e[5]] = ishead[e[6]] = 0 |
|
802 | 802 | return [self.node(r) for r in xrange(count) if ishead[r]] |
|
803 | 803 | |
|
804 | 804 | if start is None: |
|
805 | 805 | start = nullid |
|
806 | 806 | if stop is None: |
|
807 | 807 | stop = [] |
|
808 | 808 | stoprevs = set([self.rev(n) for n in stop]) |
|
809 | 809 | startrev = self.rev(start) |
|
810 | 810 | reachable = {startrev: 1} |
|
811 | 811 | heads = {startrev: 1} |
|
812 | 812 | |
|
813 | 813 | parentrevs = self.parentrevs |
|
814 | 814 | for r in xrange(startrev + 1, len(self)): |
|
815 | 815 | for p in parentrevs(r): |
|
816 | 816 | if p in reachable: |
|
817 | 817 | if r not in stoprevs: |
|
818 | 818 | reachable[r] = 1 |
|
819 | 819 | heads[r] = 1 |
|
820 | 820 | if p in heads and p not in stoprevs: |
|
821 | 821 | del heads[p] |
|
822 | 822 | |
|
823 | 823 | return [self.node(r) for r in heads] |
|
824 | 824 | |
|
825 | 825 | def children(self, node): |
|
826 | 826 | """find the children of a given node""" |
|
827 | 827 | c = [] |
|
828 | 828 | p = self.rev(node) |
|
829 | 829 | for r in range(p + 1, len(self)): |
|
830 | 830 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] |
|
831 | 831 | if prevs: |
|
832 | 832 | for pr in prevs: |
|
833 | 833 | if pr == p: |
|
834 | 834 | c.append(self.node(r)) |
|
835 | 835 | elif p == nullrev: |
|
836 | 836 | c.append(self.node(r)) |
|
837 | 837 | return c |
|
838 | 838 | |
|
839 | 839 | def _match(self, id): |
|
840 | 840 | if isinstance(id, (long, int)): |
|
841 | 841 | # rev |
|
842 | 842 | return self.node(id) |
|
843 | 843 | if len(id) == 20: |
|
844 | 844 | # possibly a binary node |
|
845 | 845 | # odds of a binary node being all hex in ASCII are 1 in 10**25 |
|
846 | 846 | try: |
|
847 | 847 | node = id |
|
848 | 848 | self.rev(node) # quick search the index |
|
849 | 849 | return node |
|
850 | 850 | except LookupError: |
|
851 | 851 | pass # may be partial hex id |
|
852 | 852 | try: |
|
853 | 853 | # str(rev) |
|
854 | 854 | rev = int(id) |
|
855 | 855 | if str(rev) != id: |
|
856 | 856 | raise ValueError |
|
857 | 857 | if rev < 0: |
|
858 | 858 | rev = len(self) + rev |
|
859 | 859 | if rev < 0 or rev >= len(self): |
|
860 | 860 | raise ValueError |
|
861 | 861 | return self.node(rev) |
|
862 | 862 | except (ValueError, OverflowError): |
|
863 | 863 | pass |
|
864 | 864 | if len(id) == 40: |
|
865 | 865 | try: |
|
866 | 866 | # a full hex nodeid? |
|
867 | 867 | node = bin(id) |
|
868 | 868 | self.rev(node) |
|
869 | 869 | return node |
|
870 | 870 | except (TypeError, LookupError): |
|
871 | 871 | pass |
|
872 | 872 | |
|
873 | 873 | def _partialmatch(self, id): |
|
874 | 874 | if len(id) < 40: |
|
875 | 875 | try: |
|
876 | 876 | # hex(node)[:...] |
|
877 | 877 | l = len(id) / 2 # grab an even number of digits |
|
878 | 878 | bin_id = bin(id[:l*2]) |
|
879 | 879 | nl = [n for n in self.nodemap if n[:l] == bin_id] |
|
880 | 880 | nl = [n for n in nl if hex(n).startswith(id)] |
|
881 | 881 | if len(nl) > 0: |
|
882 | 882 | if len(nl) == 1: |
|
883 | 883 | return nl[0] |
|
884 | 884 | raise LookupError(id, self.indexfile, |
|
885 | 885 | _('ambiguous identifier')) |
|
886 | 886 | return None |
|
887 | 887 | except TypeError: |
|
888 | 888 | pass |
|
889 | 889 | |
|
890 | 890 | def lookup(self, id): |
|
891 | 891 | """locate a node based on: |
|
892 | 892 | - revision number or str(revision number) |
|
893 | 893 | - nodeid or subset of hex nodeid |
|
894 | 894 | """ |
|
895 | 895 | n = self._match(id) |
|
896 | 896 | if n is not None: |
|
897 | 897 | return n |
|
898 | 898 | n = self._partialmatch(id) |
|
899 | 899 | if n: |
|
900 | 900 | return n |
|
901 | 901 | |
|
902 | 902 | raise LookupError(id, self.indexfile, _('no match found')) |
|
903 | 903 | |
|
904 | 904 | def cmp(self, node, text): |
|
905 | 905 | """compare text with a given file revision""" |
|
906 | 906 | p1, p2 = self.parents(node) |
|
907 | 907 | return hash(text, p1, p2) != node |
|
908 | 908 | |
|
909 | 909 | def chunk(self, rev, df=None): |
|
910 | 910 | def loadcache(df): |
|
911 | 911 | if not df: |
|
912 | 912 | if self._inline: |
|
913 | 913 | df = self.opener(self.indexfile) |
|
914 | 914 | else: |
|
915 | 915 | df = self.opener(self.datafile) |
|
916 | 916 | df.seek(start) |
|
917 | 917 | self._chunkcache = (start, df.read(cache_length)) |
|
918 | 918 | |
|
919 | 919 | start, length = self.start(rev), self.length(rev) |
|
920 | 920 | if self._inline: |
|
921 | 921 | start += (rev + 1) * self._io.size |
|
922 | 922 | end = start + length |
|
923 | 923 | |
|
924 | 924 | offset = 0 |
|
925 | 925 | if not self._chunkcache: |
|
926 | 926 | cache_length = max(65536, length) |
|
927 | 927 | loadcache(df) |
|
928 | 928 | else: |
|
929 | 929 | cache_start = self._chunkcache[0] |
|
930 | 930 | cache_length = len(self._chunkcache[1]) |
|
931 | 931 | cache_end = cache_start + cache_length |
|
932 | 932 | if start >= cache_start and end <= cache_end: |
|
933 | 933 | # it is cached |
|
934 | 934 | offset = start - cache_start |
|
935 | 935 | else: |
|
936 | 936 | cache_length = max(65536, length) |
|
937 | 937 | loadcache(df) |
|
938 | 938 | |
|
939 | 939 | # avoid copying large chunks |
|
940 | 940 | c = self._chunkcache[1] |
|
941 | 941 | if cache_length != length: |
|
942 | 942 | c = c[offset:offset + length] |
|
943 | 943 | |
|
944 | 944 | return decompress(c) |
|
945 | 945 | |
|
946 | 946 | def revdiff(self, rev1, rev2): |
|
947 | 947 | """return or calculate a delta between two revisions""" |
|
948 | 948 | if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2): |
|
949 | 949 | return self.chunk(rev2) |
|
950 | 950 | |
|
951 | 951 | return mdiff.textdiff(self.revision(self.node(rev1)), |
|
952 | 952 | self.revision(self.node(rev2))) |
|
953 | 953 | |
|
954 | 954 | def revision(self, node): |
|
955 | 955 | """return an uncompressed revision of a given node""" |
|
956 | 956 | if node == nullid: |
|
957 | 957 | return "" |
|
958 | 958 | if self._cache and self._cache[0] == node: |
|
959 | 959 | return str(self._cache[2]) |
|
960 | 960 | |
|
961 | 961 | # look up what we need to read |
|
962 | 962 | text = None |
|
963 | 963 | rev = self.rev(node) |
|
964 | 964 | base = self.base(rev) |
|
965 | 965 | |
|
966 | 966 | # check rev flags |
|
967 | 967 | if self.index[rev][0] & 0xFFFF: |
|
968 | 968 | raise RevlogError(_('incompatible revision flag %x') % |
|
969 | 969 | (self.index[rev][0] & 0xFFFF)) |
|
970 | 970 | |
|
971 | 971 | df = None |
|
972 | 972 | |
|
973 | 973 | # do we have useful data cached? |
|
974 | 974 | if self._cache and self._cache[1] >= base and self._cache[1] < rev: |
|
975 | 975 | base = self._cache[1] |
|
976 | 976 | text = str(self._cache[2]) |
|
977 | 977 | self._loadindex(base, rev + 1) |
|
978 | 978 | if not self._inline and rev > base + 1: |
|
979 | 979 | df = self.opener(self.datafile) |
|
980 | 980 | else: |
|
981 | 981 | self._loadindex(base, rev + 1) |
|
982 | 982 | if not self._inline and rev > base: |
|
983 | 983 | df = self.opener(self.datafile) |
|
984 | 984 | text = self.chunk(base, df=df) |
|
985 | 985 | |
|
986 | 986 | bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)] |
|
987 | 987 | text = mdiff.patches(text, bins) |
|
988 | 988 | p1, p2 = self.parents(node) |
|
989 | 989 | if node != hash(text, p1, p2): |
|
990 | 990 | raise RevlogError(_("integrity check failed on %s:%d") |
|
991 | 991 | % (self.datafile, rev)) |
|
992 | 992 | |
|
993 | 993 | self._cache = (node, rev, text) |
|
994 | 994 | return text |
|
995 | 995 | |
|
996 | 996 | def checkinlinesize(self, tr, fp=None): |
|
997 | 997 | if not self._inline: |
|
998 | 998 | return |
|
999 | 999 | if not fp: |
|
1000 | 1000 | fp = self.opener(self.indexfile, 'r') |
|
1001 | 1001 | fp.seek(0, 2) |
|
1002 | 1002 | size = fp.tell() |
|
1003 | 1003 | if size < 131072: |
|
1004 | 1004 | return |
|
1005 | 1005 | trinfo = tr.find(self.indexfile) |
|
1006 | 1006 | if trinfo == None: |
|
1007 | 1007 | raise RevlogError(_("%s not found in the transaction") |
|
1008 | 1008 | % self.indexfile) |
|
1009 | 1009 | |
|
1010 | 1010 | trindex = trinfo[2] |
|
1011 | 1011 | dataoff = self.start(trindex) |
|
1012 | 1012 | |
|
1013 | 1013 | tr.add(self.datafile, dataoff) |
|
1014 | 1014 | df = self.opener(self.datafile, 'w') |
|
1015 | 1015 | try: |
|
1016 | 1016 | calc = self._io.size |
|
1017 | 1017 | for r in self: |
|
1018 | 1018 | start = self.start(r) + (r + 1) * calc |
|
1019 | 1019 | length = self.length(r) |
|
1020 | 1020 | fp.seek(start) |
|
1021 | 1021 | d = fp.read(length) |
|
1022 | 1022 | df.write(d) |
|
1023 | 1023 | finally: |
|
1024 | 1024 | df.close() |
|
1025 | 1025 | |
|
1026 | 1026 | fp.close() |
|
1027 | 1027 | fp = self.opener(self.indexfile, 'w', atomictemp=True) |
|
1028 | 1028 | self.version &= ~(REVLOGNGINLINEDATA) |
|
1029 | 1029 | self._inline = False |
|
1030 | 1030 | for i in self: |
|
1031 | 1031 | e = self._io.packentry(self.index[i], self.node, self.version, i) |
|
1032 | 1032 | fp.write(e) |
|
1033 | 1033 | |
|
1034 | 1034 | # if we don't call rename, the temp file will never replace the |
|
1035 | 1035 | # real index |
|
1036 | 1036 | fp.rename() |
|
1037 | 1037 | |
|
1038 | 1038 | tr.replace(self.indexfile, trindex * calc) |
|
1039 | 1039 | self._chunkcache = None |
|
1040 | 1040 | |
|
1041 | 1041 | def addrevision(self, text, transaction, link, p1, p2, d=None): |
|
1042 | 1042 | """add a revision to the log |
|
1043 | 1043 | |
|
1044 | 1044 | text - the revision data to add |
|
1045 | 1045 | transaction - the transaction object used for rollback |
|
1046 | 1046 | link - the linkrev data to add |
|
1047 | 1047 | p1, p2 - the parent nodeids of the revision |
|
1048 | 1048 | d - an optional precomputed delta |
|
1049 | 1049 | """ |
|
1050 | 1050 | dfh = None |
|
1051 | 1051 | if not self._inline: |
|
1052 | 1052 | dfh = self.opener(self.datafile, "a") |
|
1053 | 1053 | ifh = self.opener(self.indexfile, "a+") |
|
1054 | 1054 | try: |
|
1055 | 1055 | return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh) |
|
1056 | 1056 | finally: |
|
1057 | 1057 | if dfh: |
|
1058 | 1058 | dfh.close() |
|
1059 | 1059 | ifh.close() |
|
1060 | 1060 | |
|
1061 | 1061 | def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh): |
|
1062 | 1062 | node = hash(text, p1, p2) |
|
1063 | 1063 | if node in self.nodemap: |
|
1064 | 1064 | return node |
|
1065 | 1065 | |
|
1066 | 1066 | curr = len(self) |
|
1067 | 1067 | prev = curr - 1 |
|
1068 | 1068 | base = self.base(prev) |
|
1069 | 1069 | offset = self.end(prev) |
|
1070 | 1070 | |
|
1071 | 1071 | if curr: |
|
1072 | 1072 | if not d: |
|
1073 | 1073 | ptext = self.revision(self.node(prev)) |
|
1074 | 1074 | d = mdiff.textdiff(ptext, text) |
|
1075 | 1075 | data = compress(d) |
|
1076 | 1076 | l = len(data[1]) + len(data[0]) |
|
1077 | 1077 | dist = l + offset - self.start(base) |
|
1078 | 1078 | |
|
1079 | 1079 | # full versions are inserted when the needed deltas |
|
1080 | 1080 | # become comparable to the uncompressed text |
|
1081 | 1081 | if not curr or dist > len(text) * 2: |
|
1082 | 1082 | data = compress(text) |
|
1083 | 1083 | l = len(data[1]) + len(data[0]) |
|
1084 | 1084 | base = curr |
|
1085 | 1085 | |
|
1086 | 1086 | e = (offset_type(offset, 0), l, len(text), |
|
1087 | 1087 | base, link, self.rev(p1), self.rev(p2), node) |
|
1088 | 1088 | self.index.insert(-1, e) |
|
1089 | 1089 | self.nodemap[node] = curr |
|
1090 | 1090 | |
|
1091 | 1091 | entry = self._io.packentry(e, self.node, self.version, curr) |
|
1092 | 1092 | if not self._inline: |
|
1093 | 1093 | transaction.add(self.datafile, offset) |
|
1094 | 1094 | transaction.add(self.indexfile, curr * len(entry)) |
|
1095 | 1095 | if data[0]: |
|
1096 | 1096 | dfh.write(data[0]) |
|
1097 | 1097 | dfh.write(data[1]) |
|
1098 | 1098 | dfh.flush() |
|
1099 | 1099 | ifh.write(entry) |
|
1100 | 1100 | else: |
|
1101 | 1101 | offset += curr * self._io.size |
|
1102 | 1102 | transaction.add(self.indexfile, offset, curr) |
|
1103 | 1103 | ifh.write(entry) |
|
1104 | 1104 | ifh.write(data[0]) |
|
1105 | 1105 | ifh.write(data[1]) |
|
1106 | 1106 | self.checkinlinesize(transaction, ifh) |
|
1107 | 1107 | |
|
1108 | 1108 | self._cache = (node, curr, text) |
|
1109 | 1109 | return node |
|
1110 | 1110 | |
|
1111 | 1111 | def ancestor(self, a, b): |
|
1112 | 1112 | """calculate the least common ancestor of nodes a and b""" |
|
1113 | 1113 | |
|
1114 | 1114 | def parents(rev): |
|
1115 | 1115 | return [p for p in self.parentrevs(rev) if p != nullrev] |
|
1116 | 1116 | |
|
1117 | 1117 | c = ancestor.ancestor(self.rev(a), self.rev(b), parents) |
|
1118 | 1118 | if c is None: |
|
1119 | 1119 | return nullid |
|
1120 | 1120 | |
|
1121 | 1121 | return self.node(c) |
|
1122 | 1122 | |
|
1123 | 1123 | def group(self, nodelist, lookup, infocollect=None): |
|
1124 | 1124 | """calculate a delta group |
|
1125 | 1125 | |
|
1126 | 1126 | Given a list of changeset revs, return a set of deltas and |
|
1127 | 1127 | metadata corresponding to nodes. the first delta is |
|
1128 | 1128 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to |
|
1129 | 1129 | have this parent as it has all history before these |
|
1130 | 1130 | changesets. parent is parent[0] |
|
1131 | 1131 | """ |
|
1132 | 1132 | revs = [self.rev(n) for n in nodelist] |
|
1133 | 1133 | |
|
1134 | 1134 | # if we don't have any revisions touched by these changesets, bail |
|
1135 | 1135 | if not revs: |
|
1136 | 1136 | yield changegroup.closechunk() |
|
1137 | 1137 | return |
|
1138 | 1138 | |
|
1139 | 1139 | # add the parent of the first rev |
|
1140 | 1140 | p = self.parents(self.node(revs[0]))[0] |
|
1141 | 1141 | revs.insert(0, self.rev(p)) |
|
1142 | 1142 | |
|
1143 | 1143 | # build deltas |
|
1144 | 1144 | for d in xrange(0, len(revs) - 1): |
|
1145 | 1145 | a, b = revs[d], revs[d + 1] |
|
1146 | 1146 | nb = self.node(b) |
|
1147 | 1147 | |
|
1148 | 1148 | if infocollect is not None: |
|
1149 | 1149 | infocollect(nb) |
|
1150 | 1150 | |
|
1151 | 1151 | p = self.parents(nb) |
|
1152 | 1152 | meta = nb + p[0] + p[1] + lookup(nb) |
|
1153 | 1153 | if a == -1: |
|
1154 | 1154 | d = self.revision(nb) |
|
1155 | 1155 | meta += mdiff.trivialdiffheader(len(d)) |
|
1156 | 1156 | else: |
|
1157 | 1157 | d = self.revdiff(a, b) |
|
1158 | 1158 | yield changegroup.chunkheader(len(meta) + len(d)) |
|
1159 | 1159 | yield meta |
|
1160 | 1160 | if len(d) > 2**20: |
|
1161 | 1161 | pos = 0 |
|
1162 | 1162 | while pos < len(d): |
|
1163 | 1163 | pos2 = pos + 2 ** 18 |
|
1164 | 1164 | yield d[pos:pos2] |
|
1165 | 1165 | pos = pos2 |
|
1166 | 1166 | else: |
|
1167 | 1167 | yield d |
|
1168 | 1168 | |
|
1169 | 1169 | yield changegroup.closechunk() |
|
1170 | 1170 | |
|
1171 | 1171 | def addgroup(self, revs, linkmapper, transaction): |
|
1172 | 1172 | """ |
|
1173 | 1173 | add a delta group |
|
1174 | 1174 | |
|
1175 | 1175 | given a set of deltas, add them to the revision log. the |
|
1176 | 1176 | first delta is against its parent, which should be in our |
|
1177 | 1177 | log, the rest are against the previous delta. |
|
1178 | 1178 | """ |
|
1179 | 1179 | |
|
1180 | 1180 | #track the base of the current delta log |
|
1181 | 1181 | r = len(self) |
|
1182 | 1182 | t = r - 1 |
|
1183 | 1183 | node = None |
|
1184 | 1184 | |
|
1185 | 1185 | base = prev = nullrev |
|
1186 | 1186 | start = end = textlen = 0 |
|
1187 | 1187 | if r: |
|
1188 | 1188 | end = self.end(t) |
|
1189 | 1189 | |
|
1190 | 1190 | ifh = self.opener(self.indexfile, "a+") |
|
1191 | 1191 | isize = r * self._io.size |
|
1192 | 1192 | if self._inline: |
|
1193 | 1193 | transaction.add(self.indexfile, end + isize, r) |
|
1194 | 1194 | dfh = None |
|
1195 | 1195 | else: |
|
1196 | 1196 | transaction.add(self.indexfile, isize, r) |
|
1197 | 1197 | transaction.add(self.datafile, end) |
|
1198 | 1198 | dfh = self.opener(self.datafile, "a") |
|
1199 | 1199 | |
|
1200 | 1200 | try: |
|
1201 | 1201 | # loop through our set of deltas |
|
1202 | 1202 | chain = None |
|
1203 | 1203 | for chunk in revs: |
|
1204 | 1204 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
1205 | 1205 | link = linkmapper(cs) |
|
1206 | 1206 | if node in self.nodemap: |
|
1207 | 1207 | # this can happen if two branches make the same change |
|
1208 | 1208 | chain = node |
|
1209 | 1209 | continue |
|
1210 | 1210 | delta = buffer(chunk, 80) |
|
1211 | 1211 | del chunk |
|
1212 | 1212 | |
|
1213 | 1213 | for p in (p1, p2): |
|
1214 | 1214 | if not p in self.nodemap: |
|
1215 | 1215 | raise LookupError(p, self.indexfile, _('unknown parent')) |
|
1216 | 1216 | |
|
1217 | 1217 | if not chain: |
|
1218 | 1218 | # retrieve the parent revision of the delta chain |
|
1219 | 1219 | chain = p1 |
|
1220 | 1220 | if not chain in self.nodemap: |
|
1221 | 1221 | raise LookupError(chain, self.indexfile, _('unknown base')) |
|
1222 | 1222 | |
|
1223 | 1223 | # full versions are inserted when the needed deltas become |
|
1224 | 1224 | # comparable to the uncompressed text or when the previous |
|
1225 | 1225 | # version is not the one we have a delta against. We use |
|
1226 | 1226 | # the size of the previous full rev as a proxy for the |
|
1227 | 1227 | # current size. |
|
1228 | 1228 | |
|
1229 | 1229 | if chain == prev: |
|
1230 | 1230 | cdelta = compress(delta) |
|
1231 | 1231 | cdeltalen = len(cdelta[0]) + len(cdelta[1]) |
|
1232 | 1232 | textlen = mdiff.patchedsize(textlen, delta) |
|
1233 | 1233 | |
|
1234 | 1234 | if chain != prev or (end - start + cdeltalen) > textlen * 2: |
|
1235 | 1235 | # flush our writes here so we can read it in revision |
|
1236 | 1236 | if dfh: |
|
1237 | 1237 | dfh.flush() |
|
1238 | 1238 | ifh.flush() |
|
1239 | 1239 | text = self.revision(chain) |
|
1240 | 1240 | if len(text) == 0: |
|
1241 | 1241 | # skip over trivial delta header |
|
1242 | 1242 | text = buffer(delta, 12) |
|
1243 | 1243 | else: |
|
1244 | 1244 | text = mdiff.patches(text, [delta]) |
|
1245 | 1245 | del delta |
|
1246 | 1246 | chk = self._addrevision(text, transaction, link, p1, p2, None, |
|
1247 | 1247 | ifh, dfh) |
|
1248 | 1248 | if not dfh and not self._inline: |
|
1249 | 1249 | # addrevision switched from inline to conventional |
|
1250 | 1250 | # reopen the index |
|
1251 | 1251 | dfh = self.opener(self.datafile, "a") |
|
1252 | 1252 | ifh = self.opener(self.indexfile, "a") |
|
1253 | 1253 | if chk != node: |
|
1254 | 1254 | raise RevlogError(_("consistency error adding group")) |
|
1255 | 1255 | textlen = len(text) |
|
1256 | 1256 | else: |
|
1257 | 1257 | e = (offset_type(end, 0), cdeltalen, textlen, base, |
|
1258 | 1258 | link, self.rev(p1), self.rev(p2), node) |
|
1259 | 1259 | self.index.insert(-1, e) |
|
1260 | 1260 | self.nodemap[node] = r |
|
1261 | 1261 | entry = self._io.packentry(e, self.node, self.version, r) |
|
1262 | 1262 | if self._inline: |
|
1263 | 1263 | ifh.write(entry) |
|
1264 | 1264 | ifh.write(cdelta[0]) |
|
1265 | 1265 | ifh.write(cdelta[1]) |
|
1266 | 1266 | self.checkinlinesize(transaction, ifh) |
|
1267 | 1267 | if not self._inline: |
|
1268 | 1268 | dfh = self.opener(self.datafile, "a") |
|
1269 | 1269 | ifh = self.opener(self.indexfile, "a") |
|
1270 | 1270 | else: |
|
1271 | 1271 | dfh.write(cdelta[0]) |
|
1272 | 1272 | dfh.write(cdelta[1]) |
|
1273 | 1273 | ifh.write(entry) |
|
1274 | 1274 | |
|
1275 | 1275 | t, r, chain, prev = r, r + 1, node, node |
|
1276 | 1276 | base = self.base(t) |
|
1277 | 1277 | start = self.start(base) |
|
1278 | 1278 | end = self.end(t) |
|
1279 | 1279 | finally: |
|
1280 | 1280 | if dfh: |
|
1281 | 1281 | dfh.close() |
|
1282 | 1282 | ifh.close() |
|
1283 | 1283 | |
|
1284 | 1284 | return node |
|
1285 | 1285 | |
|
1286 | 1286 | def strip(self, minlink, transaction): |
|
1287 | 1287 | """truncate the revlog on the first revision with a linkrev >= minlink |
|
1288 | 1288 | |
|
1289 | 1289 | This function is called when we're stripping revision minlink and |
|
1290 | 1290 | its descendants from the repository. |
|
1291 | 1291 | |
|
1292 | 1292 | We have to remove all revisions with linkrev >= minlink, because |
|
1293 | 1293 | the equivalent changelog revisions will be renumbered after the |
|
1294 | 1294 | strip. |
|
1295 | 1295 | |
|
1296 | 1296 | So we truncate the revlog on the first of these revisions, and |
|
1297 | 1297 | trust that the caller has saved the revisions that shouldn't be |
|
1298 | 1298 | removed and that it'll readd them after this truncation. |
|
1299 | 1299 | """ |
|
1300 | 1300 | if len(self) == 0: |
|
1301 | 1301 | return |
|
1302 | 1302 | |
|
1303 | 1303 | if isinstance(self.index, lazyindex): |
|
1304 | 1304 | self._loadindexmap() |
|
1305 | 1305 | |
|
1306 | 1306 | for rev in self: |
|
1307 | 1307 | if self.index[rev][4] >= minlink: |
|
1308 | 1308 | break |
|
1309 | 1309 | else: |
|
1310 | 1310 | return |
|
1311 | 1311 | |
|
1312 | 1312 | # first truncate the files on disk |
|
1313 | 1313 | end = self.start(rev) |
|
1314 | 1314 | if not self._inline: |
|
1315 | 1315 | transaction.add(self.datafile, end) |
|
1316 | 1316 | end = rev * self._io.size |
|
1317 | 1317 | else: |
|
1318 | 1318 | end += rev * self._io.size |
|
1319 | 1319 | |
|
1320 | 1320 | transaction.add(self.indexfile, end) |
|
1321 | 1321 | |
|
1322 | 1322 | # then reset internal state in memory to forget those revisions |
|
1323 | 1323 | self._cache = None |
|
1324 | 1324 | self._chunkcache = None |
|
1325 | 1325 | for x in xrange(rev, len(self)): |
|
1326 | 1326 | del self.nodemap[self.node(x)] |
|
1327 | 1327 | |
|
1328 | 1328 | del self.index[rev:-1] |
|
1329 | 1329 | |
|
1330 | 1330 | def checksize(self): |
|
1331 | 1331 | expected = 0 |
|
1332 | 1332 | if len(self): |
|
1333 | 1333 | expected = max(0, self.end(len(self) - 1)) |
|
1334 | 1334 | |
|
1335 | 1335 | try: |
|
1336 | 1336 | f = self.opener(self.datafile) |
|
1337 | 1337 | f.seek(0, 2) |
|
1338 | 1338 | actual = f.tell() |
|
1339 | 1339 | dd = actual - expected |
|
1340 | 1340 | except IOError, inst: |
|
1341 | 1341 | if inst.errno != errno.ENOENT: |
|
1342 | 1342 | raise |
|
1343 | 1343 | dd = 0 |
|
1344 | 1344 | |
|
1345 | 1345 | try: |
|
1346 | 1346 | f = self.opener(self.indexfile) |
|
1347 | 1347 | f.seek(0, 2) |
|
1348 | 1348 | actual = f.tell() |
|
1349 | 1349 | s = self._io.size |
|
1350 | 1350 | i = max(0, actual / s) |
|
1351 | 1351 | di = actual - (i * s) |
|
1352 | 1352 | if self._inline: |
|
1353 | 1353 | databytes = 0 |
|
1354 | 1354 | for r in self: |
|
1355 | 1355 | databytes += max(0, self.length(r)) |
|
1356 | 1356 | dd = 0 |
|
1357 | 1357 | di = actual - len(self) * s - databytes |
|
1358 | 1358 | except IOError, inst: |
|
1359 | 1359 | if inst.errno != errno.ENOENT: |
|
1360 | 1360 | raise |
|
1361 | 1361 | di = 0 |
|
1362 | 1362 | |
|
1363 | 1363 | return (dd, di) |
|
1364 | 1364 | |
|
1365 | 1365 | def files(self): |
|
1366 | 1366 | res = [ self.indexfile ] |
|
1367 | 1367 | if not self._inline: |
|
1368 | 1368 | res.append(self.datafile) |
|
1369 | 1369 | return res |
General Comments 0
You need to be logged in to leave comments.
Login now