Show More
@@ -1,406 +1,407 b'' | |||
|
1 | 1 | # Copyright (C) 2007 Brendan Cully <brendan@kublai.com> |
|
2 | 2 | # Published under the GNU GPL |
|
3 | 3 | |
|
4 | 4 | ''' |
|
5 | 5 | imerge - interactive merge |
|
6 | 6 | ''' |
|
7 | 7 | |
|
8 | 8 | from mercurial.i18n import _ |
|
9 | 9 | from mercurial.node import hex, short |
|
10 | 10 | from mercurial import commands, cmdutil, dispatch, fancyopts |
|
11 | 11 | from mercurial import hg, filemerge, util, revlog |
|
12 | 12 | import os, tarfile |
|
13 | 13 | |
|
14 | 14 | class InvalidStateFileException(Exception): pass |
|
15 | 15 | |
|
16 | 16 | class ImergeStateFile(object): |
|
17 | 17 | def __init__(self, im): |
|
18 | 18 | self.im = im |
|
19 | 19 | |
|
20 | 20 | def save(self, dest): |
|
21 | 21 | tf = tarfile.open(dest, 'w:gz') |
|
22 | 22 | |
|
23 | 23 | st = os.path.join(self.im.path, 'status') |
|
24 | 24 | tf.add(st, os.path.join('.hg', 'imerge', 'status')) |
|
25 | 25 | |
|
26 | 26 | for f in self.im.resolved: |
|
27 | 27 | (fd, fo) = self.im.conflicts[f] |
|
28 | 28 | abssrc = self.im.repo.wjoin(fd) |
|
29 | 29 | tf.add(abssrc, fd) |
|
30 | 30 | |
|
31 | 31 | tf.close() |
|
32 | 32 | |
|
33 | 33 | def load(self, source): |
|
34 | 34 | wlock = self.im.repo.wlock() |
|
35 | 35 | lock = self.im.repo.lock() |
|
36 | 36 | |
|
37 | 37 | tf = tarfile.open(source, 'r') |
|
38 | 38 | contents = tf.getnames() |
|
39 | 39 | # tarfile normalizes path separators to '/' |
|
40 | 40 | statusfile = '.hg/imerge/status' |
|
41 | 41 | if statusfile not in contents: |
|
42 | 42 | raise InvalidStateFileException('no status file') |
|
43 | 43 | |
|
44 | 44 | tf.extract(statusfile, self.im.repo.root) |
|
45 | 45 | p1, p2 = self.im.load() |
|
46 | 46 | if self.im.repo.dirstate.parents()[0] != p1.node(): |
|
47 | 47 | hg.clean(self.im.repo, p1.node()) |
|
48 | 48 | self.im.start(p2.node()) |
|
49 | 49 | for tarinfo in tf: |
|
50 | 50 | tf.extract(tarinfo, self.im.repo.root) |
|
51 | 51 | self.im.load() |
|
52 | 52 | |
|
53 | 53 | class Imerge(object): |
|
54 | 54 | def __init__(self, ui, repo): |
|
55 | 55 | self.ui = ui |
|
56 | 56 | self.repo = repo |
|
57 | 57 | |
|
58 | 58 | self.path = repo.join('imerge') |
|
59 | 59 | self.opener = util.opener(self.path) |
|
60 | 60 | |
|
61 | 61 | self.wctx = self.repo.workingctx() |
|
62 | 62 | self.conflicts = {} |
|
63 | 63 | self.resolved = [] |
|
64 | 64 | |
|
65 | 65 | def merging(self): |
|
66 | 66 | return len(self.wctx.parents()) > 1 |
|
67 | 67 | |
|
68 | 68 | def load(self): |
|
69 | 69 | # status format. \0-delimited file, fields are |
|
70 | 70 | # p1, p2, conflict count, conflict filenames, resolved filenames |
|
71 | 71 | # conflict filenames are tuples of localname, remoteorig, remotenew |
|
72 | 72 | |
|
73 | 73 | statusfile = self.opener('status') |
|
74 | 74 | |
|
75 | 75 | status = statusfile.read().split('\0') |
|
76 | 76 | if len(status) < 3: |
|
77 | 77 | raise util.Abort('invalid imerge status file') |
|
78 | 78 | |
|
79 | 79 | try: |
|
80 | 80 | parents = [self.repo.changectx(n) for n in status[:2]] |
|
81 | 81 | except revlog.LookupError, e: |
|
82 |
raise util.Abort('merge parent %s not in repository' % |
|
|
82 | raise util.Abort(_('merge parent %s not in repository') % | |
|
83 | short(e.name)) | |
|
83 | 84 | |
|
84 | 85 | status = status[2:] |
|
85 | 86 | conflicts = int(status.pop(0)) * 3 |
|
86 | 87 | self.resolved = status[conflicts:] |
|
87 | 88 | for i in xrange(0, conflicts, 3): |
|
88 | 89 | self.conflicts[status[i]] = (status[i+1], status[i+2]) |
|
89 | 90 | |
|
90 | 91 | return parents |
|
91 | 92 | |
|
92 | 93 | def save(self): |
|
93 | 94 | lock = self.repo.lock() |
|
94 | 95 | |
|
95 | 96 | if not os.path.isdir(self.path): |
|
96 | 97 | os.mkdir(self.path) |
|
97 | 98 | statusfile = self.opener('status', 'wb') |
|
98 | 99 | |
|
99 | 100 | out = [hex(n.node()) for n in self.wctx.parents()] |
|
100 | 101 | out.append(str(len(self.conflicts))) |
|
101 | 102 | conflicts = self.conflicts.items() |
|
102 | 103 | conflicts.sort() |
|
103 | 104 | for fw, fd_fo in conflicts: |
|
104 | 105 | out.append(fw) |
|
105 | 106 | out.extend(fd_fo) |
|
106 | 107 | out.extend(self.resolved) |
|
107 | 108 | |
|
108 | 109 | statusfile.write('\0'.join(out)) |
|
109 | 110 | |
|
110 | 111 | def remaining(self): |
|
111 | 112 | return [f for f in self.conflicts if f not in self.resolved] |
|
112 | 113 | |
|
113 | 114 | def filemerge(self, fn, interactive=True): |
|
114 | 115 | wlock = self.repo.wlock() |
|
115 | 116 | |
|
116 | 117 | (fd, fo) = self.conflicts[fn] |
|
117 | 118 | p1, p2 = self.wctx.parents() |
|
118 | 119 | |
|
119 | 120 | # this could be greatly improved |
|
120 | 121 | realmerge = os.environ.get('HGMERGE') |
|
121 | 122 | if not interactive: |
|
122 | 123 | os.environ['HGMERGE'] = 'merge' |
|
123 | 124 | |
|
124 | 125 | # The filemerge ancestor algorithm does not work if self.wctx |
|
125 | 126 | # already has two parents (in normal merge it doesn't yet). But |
|
126 | 127 | # this is very dirty. |
|
127 | 128 | self.wctx._parents.pop() |
|
128 | 129 | try: |
|
129 | 130 | # TODO: we should probably revert the file if merge fails |
|
130 | 131 | return filemerge.filemerge(self.repo, fn, fd, fo, self.wctx, p2) |
|
131 | 132 | finally: |
|
132 | 133 | self.wctx._parents.append(p2) |
|
133 | 134 | if realmerge: |
|
134 | 135 | os.environ['HGMERGE'] = realmerge |
|
135 | 136 | elif not interactive: |
|
136 | 137 | del os.environ['HGMERGE'] |
|
137 | 138 | |
|
138 | 139 | def start(self, rev=None): |
|
139 | 140 | _filemerge = filemerge.filemerge |
|
140 | 141 | def filemerge_(repo, fw, fd, fo, wctx, mctx): |
|
141 | 142 | self.conflicts[fw] = (fd, fo) |
|
142 | 143 | |
|
143 | 144 | filemerge.filemerge = filemerge_ |
|
144 | 145 | commands.merge(self.ui, self.repo, rev=rev) |
|
145 | 146 | filemerge.filemerge = _filemerge |
|
146 | 147 | |
|
147 | 148 | self.wctx = self.repo.workingctx() |
|
148 | 149 | self.save() |
|
149 | 150 | |
|
150 | 151 | def resume(self): |
|
151 | 152 | self.load() |
|
152 | 153 | |
|
153 | 154 | dp = self.repo.dirstate.parents() |
|
154 | 155 | p1, p2 = self.wctx.parents() |
|
155 | 156 | if p1.node() != dp[0] or p2.node() != dp[1]: |
|
156 | 157 | raise util.Abort('imerge state does not match working directory') |
|
157 | 158 | |
|
158 | 159 | def next(self): |
|
159 | 160 | remaining = self.remaining() |
|
160 | 161 | return remaining and remaining[0] |
|
161 | 162 | |
|
162 | 163 | def resolve(self, files): |
|
163 | 164 | resolved = dict.fromkeys(self.resolved) |
|
164 | 165 | for fn in files: |
|
165 | 166 | if fn not in self.conflicts: |
|
166 | 167 | raise util.Abort('%s is not in the merge set' % fn) |
|
167 | 168 | resolved[fn] = True |
|
168 | 169 | self.resolved = resolved.keys() |
|
169 | 170 | self.resolved.sort() |
|
170 | 171 | self.save() |
|
171 | 172 | return 0 |
|
172 | 173 | |
|
173 | 174 | def unresolve(self, files): |
|
174 | 175 | resolved = dict.fromkeys(self.resolved) |
|
175 | 176 | for fn in files: |
|
176 | 177 | if fn not in resolved: |
|
177 | 178 | raise util.Abort('%s is not resolved' % fn) |
|
178 | 179 | del resolved[fn] |
|
179 | 180 | self.resolved = resolved.keys() |
|
180 | 181 | self.resolved.sort() |
|
181 | 182 | self.save() |
|
182 | 183 | return 0 |
|
183 | 184 | |
|
184 | 185 | def pickle(self, dest): |
|
185 | 186 | '''write current merge state to file to be resumed elsewhere''' |
|
186 | 187 | state = ImergeStateFile(self) |
|
187 | 188 | return state.save(dest) |
|
188 | 189 | |
|
189 | 190 | def unpickle(self, source): |
|
190 | 191 | '''read merge state from file''' |
|
191 | 192 | state = ImergeStateFile(self) |
|
192 | 193 | return state.load(source) |
|
193 | 194 | |
|
194 | 195 | def load(im, source): |
|
195 | 196 | if im.merging(): |
|
196 | 197 | raise util.Abort('there is already a merge in progress ' |
|
197 | 198 | '(update -C <rev> to abort it)' ) |
|
198 | 199 | m, a, r, d = im.repo.status()[:4] |
|
199 | 200 | if m or a or r or d: |
|
200 | 201 | raise util.Abort('working directory has uncommitted changes') |
|
201 | 202 | |
|
202 | 203 | rc = im.unpickle(source) |
|
203 | 204 | if not rc: |
|
204 | 205 | status(im) |
|
205 | 206 | return rc |
|
206 | 207 | |
|
207 | 208 | def merge_(im, filename=None, auto=False): |
|
208 | 209 | success = True |
|
209 | 210 | if auto and not filename: |
|
210 | 211 | for fn in im.remaining(): |
|
211 | 212 | rc = im.filemerge(fn, interactive=False) |
|
212 | 213 | if rc: |
|
213 | 214 | success = False |
|
214 | 215 | else: |
|
215 | 216 | im.resolve([fn]) |
|
216 | 217 | if success: |
|
217 | 218 | im.ui.write('all conflicts resolved\n') |
|
218 | 219 | else: |
|
219 | 220 | status(im) |
|
220 | 221 | return 0 |
|
221 | 222 | |
|
222 | 223 | if not filename: |
|
223 | 224 | filename = im.next() |
|
224 | 225 | if not filename: |
|
225 | 226 | im.ui.write('all conflicts resolved\n') |
|
226 | 227 | return 0 |
|
227 | 228 | |
|
228 | 229 | rc = im.filemerge(filename, interactive=not auto) |
|
229 | 230 | if not rc: |
|
230 | 231 | im.resolve([filename]) |
|
231 | 232 | if not im.next(): |
|
232 | 233 | im.ui.write('all conflicts resolved\n') |
|
233 | 234 | return rc |
|
234 | 235 | |
|
235 | 236 | def next(im): |
|
236 | 237 | n = im.next() |
|
237 | 238 | if n: |
|
238 | 239 | im.ui.write('%s\n' % n) |
|
239 | 240 | else: |
|
240 | 241 | im.ui.write('all conflicts resolved\n') |
|
241 | 242 | return 0 |
|
242 | 243 | |
|
243 | 244 | def resolve(im, *files): |
|
244 | 245 | if not files: |
|
245 | 246 | raise util.Abort('resolve requires at least one filename') |
|
246 | 247 | return im.resolve(files) |
|
247 | 248 | |
|
248 | 249 | def save(im, dest): |
|
249 | 250 | return im.pickle(dest) |
|
250 | 251 | |
|
251 | 252 | def status(im, **opts): |
|
252 | 253 | if not opts.get('resolved') and not opts.get('unresolved'): |
|
253 | 254 | opts['resolved'] = True |
|
254 | 255 | opts['unresolved'] = True |
|
255 | 256 | |
|
256 | 257 | if im.ui.verbose: |
|
257 | 258 | p1, p2 = [short(p.node()) for p in im.wctx.parents()] |
|
258 | 259 | im.ui.note(_('merging %s and %s\n') % (p1, p2)) |
|
259 | 260 | |
|
260 | 261 | conflicts = im.conflicts.keys() |
|
261 | 262 | conflicts.sort() |
|
262 | 263 | remaining = dict.fromkeys(im.remaining()) |
|
263 | 264 | st = [] |
|
264 | 265 | for fn in conflicts: |
|
265 | 266 | if opts.get('no_status'): |
|
266 | 267 | mode = '' |
|
267 | 268 | elif fn in remaining: |
|
268 | 269 | mode = 'U ' |
|
269 | 270 | else: |
|
270 | 271 | mode = 'R ' |
|
271 | 272 | if ((opts.get('resolved') and fn not in remaining) |
|
272 | 273 | or (opts.get('unresolved') and fn in remaining)): |
|
273 | 274 | st.append((mode, fn)) |
|
274 | 275 | st.sort() |
|
275 | 276 | for (mode, fn) in st: |
|
276 | 277 | if im.ui.verbose: |
|
277 | 278 | fo, fd = im.conflicts[fn] |
|
278 | 279 | if fd != fn: |
|
279 | 280 | fn = '%s (%s)' % (fn, fd) |
|
280 | 281 | im.ui.write('%s%s\n' % (mode, fn)) |
|
281 | 282 | if opts.get('unresolved') and not remaining: |
|
282 | 283 | im.ui.write(_('all conflicts resolved\n')) |
|
283 | 284 | |
|
284 | 285 | return 0 |
|
285 | 286 | |
|
286 | 287 | def unresolve(im, *files): |
|
287 | 288 | if not files: |
|
288 | 289 | raise util.Abort('unresolve requires at least one filename') |
|
289 | 290 | return im.unresolve(files) |
|
290 | 291 | |
|
291 | 292 | subcmdtable = { |
|
292 | 293 | 'load': (load, []), |
|
293 | 294 | 'merge': |
|
294 | 295 | (merge_, |
|
295 | 296 | [('a', 'auto', None, _('automatically resolve if possible'))]), |
|
296 | 297 | 'next': (next, []), |
|
297 | 298 | 'resolve': (resolve, []), |
|
298 | 299 | 'save': (save, []), |
|
299 | 300 | 'status': |
|
300 | 301 | (status, |
|
301 | 302 | [('n', 'no-status', None, _('hide status prefix')), |
|
302 | 303 | ('', 'resolved', None, _('only show resolved conflicts')), |
|
303 | 304 | ('', 'unresolved', None, _('only show unresolved conflicts'))]), |
|
304 | 305 | 'unresolve': (unresolve, []) |
|
305 | 306 | } |
|
306 | 307 | |
|
307 | 308 | def dispatch_(im, args, opts): |
|
308 | 309 | def complete(s, choices): |
|
309 | 310 | candidates = [] |
|
310 | 311 | for choice in choices: |
|
311 | 312 | if choice.startswith(s): |
|
312 | 313 | candidates.append(choice) |
|
313 | 314 | return candidates |
|
314 | 315 | |
|
315 | 316 | c, args = args[0], list(args[1:]) |
|
316 | 317 | cmd = complete(c, subcmdtable.keys()) |
|
317 | 318 | if not cmd: |
|
318 | 319 | raise cmdutil.UnknownCommand('imerge ' + c) |
|
319 | 320 | if len(cmd) > 1: |
|
320 | 321 | cmd.sort() |
|
321 | 322 | raise cmdutil.AmbiguousCommand('imerge ' + c, cmd) |
|
322 | 323 | cmd = cmd[0] |
|
323 | 324 | |
|
324 | 325 | func, optlist = subcmdtable[cmd] |
|
325 | 326 | opts = {} |
|
326 | 327 | try: |
|
327 | 328 | args = fancyopts.fancyopts(args, optlist, opts) |
|
328 | 329 | return func(im, *args, **opts) |
|
329 | 330 | except fancyopts.getopt.GetoptError, inst: |
|
330 | 331 | raise dispatch.ParseError('imerge', '%s: %s' % (cmd, inst)) |
|
331 | 332 | except TypeError: |
|
332 | 333 | raise dispatch.ParseError('imerge', _('%s: invalid arguments') % cmd) |
|
333 | 334 | |
|
334 | 335 | def imerge(ui, repo, *args, **opts): |
|
335 | 336 | '''interactive merge |
|
336 | 337 | |
|
337 | 338 | imerge lets you split a merge into pieces. When you start a merge |
|
338 | 339 | with imerge, the names of all files with conflicts are recorded. |
|
339 | 340 | You can then merge any of these files, and if the merge is |
|
340 | 341 | successful, they will be marked as resolved. When all files are |
|
341 | 342 | resolved, the merge is complete. |
|
342 | 343 | |
|
343 | 344 | If no merge is in progress, hg imerge [rev] will merge the working |
|
344 | 345 | directory with rev (defaulting to the other head if the repository |
|
345 | 346 | only has two heads). You may also resume a saved merge with |
|
346 | 347 | hg imerge load <file>. |
|
347 | 348 | |
|
348 | 349 | If a merge is in progress, hg imerge will default to merging the |
|
349 | 350 | next unresolved file. |
|
350 | 351 | |
|
351 | 352 | The following subcommands are available: |
|
352 | 353 | |
|
353 | 354 | status: |
|
354 | 355 | show the current state of the merge |
|
355 | 356 | options: |
|
356 | 357 | -n --no-status: do not print the status prefix |
|
357 | 358 | --resolved: only print resolved conflicts |
|
358 | 359 | --unresolved: only print unresolved conflicts |
|
359 | 360 | next: |
|
360 | 361 | show the next unresolved file merge |
|
361 | 362 | merge [<file>]: |
|
362 | 363 | merge <file>. If the file merge is successful, the file will be |
|
363 | 364 | recorded as resolved. If no file is given, the next unresolved |
|
364 | 365 | file will be merged. |
|
365 | 366 | resolve <file>...: |
|
366 | 367 | mark files as successfully merged |
|
367 | 368 | unresolve <file>...: |
|
368 | 369 | mark files as requiring merging. |
|
369 | 370 | save <file>: |
|
370 | 371 | save the state of the merge to a file to be resumed elsewhere |
|
371 | 372 | load <file>: |
|
372 | 373 | load the state of the merge from a file created by save |
|
373 | 374 | ''' |
|
374 | 375 | |
|
375 | 376 | im = Imerge(ui, repo) |
|
376 | 377 | |
|
377 | 378 | if im.merging(): |
|
378 | 379 | im.resume() |
|
379 | 380 | else: |
|
380 | 381 | rev = opts.get('rev') |
|
381 | 382 | if rev and args: |
|
382 | 383 | raise util.Abort('please specify just one revision') |
|
383 | 384 | |
|
384 | 385 | if len(args) == 2 and args[0] == 'load': |
|
385 | 386 | pass |
|
386 | 387 | else: |
|
387 | 388 | if args: |
|
388 | 389 | rev = args[0] |
|
389 | 390 | im.start(rev=rev) |
|
390 | 391 | if opts.get('auto'): |
|
391 | 392 | args = ['merge', '--auto'] |
|
392 | 393 | else: |
|
393 | 394 | args = ['status'] |
|
394 | 395 | |
|
395 | 396 | if not args: |
|
396 | 397 | args = ['merge'] |
|
397 | 398 | |
|
398 | 399 | return dispatch_(im, args, opts) |
|
399 | 400 | |
|
400 | 401 | cmdtable = { |
|
401 | 402 | '^imerge': |
|
402 | 403 | (imerge, |
|
403 | 404 | [('r', 'rev', '', _('revision to merge')), |
|
404 | 405 | ('a', 'auto', None, _('automatically merge where possible'))], |
|
405 | 406 | 'hg imerge [command]') |
|
406 | 407 | } |
@@ -1,282 +1,283 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | bundlerepo.py - repository class for viewing uncompressed bundles |
|
3 | 3 | |
|
4 | 4 | This provides a read-only repository interface to bundles as if |
|
5 | 5 | they were part of the actual repository. |
|
6 | 6 | |
|
7 | 7 | Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> |
|
8 | 8 | |
|
9 | 9 | This software may be used and distributed according to the terms |
|
10 | 10 | of the GNU General Public License, incorporated herein by reference. |
|
11 | 11 | """ |
|
12 | 12 | |
|
13 | 13 | from node import hex, nullid, short |
|
14 | 14 | from i18n import _ |
|
15 | 15 | import changegroup, util, os, struct, bz2, tempfile, mdiff |
|
16 | 16 | import localrepo, changelog, manifest, filelog, revlog |
|
17 | 17 | |
|
18 | 18 | class bundlerevlog(revlog.revlog): |
|
19 | 19 | def __init__(self, opener, indexfile, bundlefile, |
|
20 | 20 | linkmapper=None): |
|
21 | 21 | # How it works: |
|
22 | 22 | # to retrieve a revision, we need to know the offset of |
|
23 | 23 | # the revision in the bundlefile (an opened file). |
|
24 | 24 | # |
|
25 | 25 | # We store this offset in the index (start), to differentiate a |
|
26 | 26 | # rev in the bundle and from a rev in the revlog, we check |
|
27 | 27 | # len(index[r]). If the tuple is bigger than 7, it is a bundle |
|
28 | 28 | # (it is bigger since we store the node to which the delta is) |
|
29 | 29 | # |
|
30 | 30 | revlog.revlog.__init__(self, opener, indexfile) |
|
31 | 31 | self.bundlefile = bundlefile |
|
32 | 32 | self.basemap = {} |
|
33 | 33 | def chunkpositer(): |
|
34 | 34 | for chunk in changegroup.chunkiter(bundlefile): |
|
35 | 35 | pos = bundlefile.tell() |
|
36 | 36 | yield chunk, pos - len(chunk) |
|
37 | 37 | n = self.count() |
|
38 | 38 | prev = None |
|
39 | 39 | for chunk, start in chunkpositer(): |
|
40 | 40 | size = len(chunk) |
|
41 | 41 | if size < 80: |
|
42 | 42 | raise util.Abort("invalid changegroup") |
|
43 | 43 | start += 80 |
|
44 | 44 | size -= 80 |
|
45 | 45 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
46 | 46 | if node in self.nodemap: |
|
47 | 47 | prev = node |
|
48 | 48 | continue |
|
49 | 49 | for p in (p1, p2): |
|
50 | 50 | if not p in self.nodemap: |
|
51 |
raise revlog.LookupError( |
|
|
51 | raise revlog.LookupError(p1, self.indexfile, | |
|
52 | _("unknown parent")) | |
|
52 | 53 | if linkmapper is None: |
|
53 | 54 | link = n |
|
54 | 55 | else: |
|
55 | 56 | link = linkmapper(cs) |
|
56 | 57 | |
|
57 | 58 | if not prev: |
|
58 | 59 | prev = p1 |
|
59 | 60 | # start, size, full unc. size, base (unused), link, p1, p2, node |
|
60 | 61 | e = (revlog.offset_type(start, 0), size, -1, -1, link, |
|
61 | 62 | self.rev(p1), self.rev(p2), node) |
|
62 | 63 | self.basemap[n] = prev |
|
63 | 64 | self.index.insert(-1, e) |
|
64 | 65 | self.nodemap[node] = n |
|
65 | 66 | prev = node |
|
66 | 67 | n += 1 |
|
67 | 68 | |
|
68 | 69 | def bundle(self, rev): |
|
69 | 70 | """is rev from the bundle""" |
|
70 | 71 | if rev < 0: |
|
71 | 72 | return False |
|
72 | 73 | return rev in self.basemap |
|
73 | 74 | def bundlebase(self, rev): return self.basemap[rev] |
|
74 | 75 | def chunk(self, rev, df=None, cachelen=4096): |
|
75 | 76 | # Warning: in case of bundle, the diff is against bundlebase, |
|
76 | 77 | # not against rev - 1 |
|
77 | 78 | # XXX: could use some caching |
|
78 | 79 | if not self.bundle(rev): |
|
79 | 80 | return revlog.revlog.chunk(self, rev, df) |
|
80 | 81 | self.bundlefile.seek(self.start(rev)) |
|
81 | 82 | return self.bundlefile.read(self.length(rev)) |
|
82 | 83 | |
|
83 | 84 | def revdiff(self, rev1, rev2): |
|
84 | 85 | """return or calculate a delta between two revisions""" |
|
85 | 86 | if self.bundle(rev1) and self.bundle(rev2): |
|
86 | 87 | # hot path for bundle |
|
87 | 88 | revb = self.rev(self.bundlebase(rev2)) |
|
88 | 89 | if revb == rev1: |
|
89 | 90 | return self.chunk(rev2) |
|
90 | 91 | elif not self.bundle(rev1) and not self.bundle(rev2): |
|
91 | 92 | return revlog.revlog.revdiff(self, rev1, rev2) |
|
92 | 93 | |
|
93 | 94 | return mdiff.textdiff(self.revision(self.node(rev1)), |
|
94 | 95 | self.revision(self.node(rev2))) |
|
95 | 96 | |
|
96 | 97 | def revision(self, node): |
|
97 | 98 | """return an uncompressed revision of a given""" |
|
98 | 99 | if node == nullid: return "" |
|
99 | 100 | |
|
100 | 101 | text = None |
|
101 | 102 | chain = [] |
|
102 | 103 | iter_node = node |
|
103 | 104 | rev = self.rev(iter_node) |
|
104 | 105 | # reconstruct the revision if it is from a changegroup |
|
105 | 106 | while self.bundle(rev): |
|
106 | 107 | if self._cache and self._cache[0] == iter_node: |
|
107 | 108 | text = self._cache[2] |
|
108 | 109 | break |
|
109 | 110 | chain.append(rev) |
|
110 | 111 | iter_node = self.bundlebase(rev) |
|
111 | 112 | rev = self.rev(iter_node) |
|
112 | 113 | if text is None: |
|
113 | 114 | text = revlog.revlog.revision(self, iter_node) |
|
114 | 115 | |
|
115 | 116 | while chain: |
|
116 | 117 | delta = self.chunk(chain.pop()) |
|
117 | 118 | text = mdiff.patches(text, [delta]) |
|
118 | 119 | |
|
119 | 120 | p1, p2 = self.parents(node) |
|
120 | 121 | if node != revlog.hash(text, p1, p2): |
|
121 | 122 | raise revlog.RevlogError(_("integrity check failed on %s:%d") |
|
122 | 123 | % (self.datafile, self.rev(node))) |
|
123 | 124 | |
|
124 | 125 | self._cache = (node, self.rev(node), text) |
|
125 | 126 | return text |
|
126 | 127 | |
|
127 | 128 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
|
128 | 129 | raise NotImplementedError |
|
129 | 130 | def addgroup(self, revs, linkmapper, transaction, unique=0): |
|
130 | 131 | raise NotImplementedError |
|
131 | 132 | def strip(self, rev, minlink): |
|
132 | 133 | raise NotImplementedError |
|
133 | 134 | def checksize(self): |
|
134 | 135 | raise NotImplementedError |
|
135 | 136 | |
|
136 | 137 | class bundlechangelog(bundlerevlog, changelog.changelog): |
|
137 | 138 | def __init__(self, opener, bundlefile): |
|
138 | 139 | changelog.changelog.__init__(self, opener) |
|
139 | 140 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile) |
|
140 | 141 | |
|
141 | 142 | class bundlemanifest(bundlerevlog, manifest.manifest): |
|
142 | 143 | def __init__(self, opener, bundlefile, linkmapper): |
|
143 | 144 | manifest.manifest.__init__(self, opener) |
|
144 | 145 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, |
|
145 | 146 | linkmapper) |
|
146 | 147 | |
|
147 | 148 | class bundlefilelog(bundlerevlog, filelog.filelog): |
|
148 | 149 | def __init__(self, opener, path, bundlefile, linkmapper): |
|
149 | 150 | filelog.filelog.__init__(self, opener, path) |
|
150 | 151 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, |
|
151 | 152 | linkmapper) |
|
152 | 153 | |
|
153 | 154 | class bundlerepository(localrepo.localrepository): |
|
154 | 155 | def __init__(self, ui, path, bundlename): |
|
155 | 156 | localrepo.localrepository.__init__(self, ui, path) |
|
156 | 157 | |
|
157 | 158 | if path: |
|
158 | 159 | self._url = 'bundle:' + path + '+' + bundlename |
|
159 | 160 | else: |
|
160 | 161 | self._url = 'bundle:' + bundlename |
|
161 | 162 | |
|
162 | 163 | self.tempfile = None |
|
163 | 164 | self.bundlefile = open(bundlename, "rb") |
|
164 | 165 | header = self.bundlefile.read(6) |
|
165 | 166 | if not header.startswith("HG"): |
|
166 | 167 | raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename) |
|
167 | 168 | elif not header.startswith("HG10"): |
|
168 | 169 | raise util.Abort(_("%s: unknown bundle version") % bundlename) |
|
169 | 170 | elif header == "HG10BZ": |
|
170 | 171 | fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-", |
|
171 | 172 | suffix=".hg10un", dir=self.path) |
|
172 | 173 | self.tempfile = temp |
|
173 | 174 | fptemp = os.fdopen(fdtemp, 'wb') |
|
174 | 175 | def generator(f): |
|
175 | 176 | zd = bz2.BZ2Decompressor() |
|
176 | 177 | zd.decompress("BZ") |
|
177 | 178 | for chunk in f: |
|
178 | 179 | yield zd.decompress(chunk) |
|
179 | 180 | gen = generator(util.filechunkiter(self.bundlefile, 4096)) |
|
180 | 181 | |
|
181 | 182 | try: |
|
182 | 183 | fptemp.write("HG10UN") |
|
183 | 184 | for chunk in gen: |
|
184 | 185 | fptemp.write(chunk) |
|
185 | 186 | finally: |
|
186 | 187 | fptemp.close() |
|
187 | 188 | self.bundlefile.close() |
|
188 | 189 | |
|
189 | 190 | self.bundlefile = open(self.tempfile, "rb") |
|
190 | 191 | # seek right after the header |
|
191 | 192 | self.bundlefile.seek(6) |
|
192 | 193 | elif header == "HG10UN": |
|
193 | 194 | # nothing to do |
|
194 | 195 | pass |
|
195 | 196 | else: |
|
196 | 197 | raise util.Abort(_("%s: unknown bundle compression type") |
|
197 | 198 | % bundlename) |
|
198 | 199 | # dict with the mapping 'filename' -> position in the bundle |
|
199 | 200 | self.bundlefilespos = {} |
|
200 | 201 | |
|
201 | 202 | def __getattr__(self, name): |
|
202 | 203 | if name == 'changelog': |
|
203 | 204 | self.changelog = bundlechangelog(self.sopener, self.bundlefile) |
|
204 | 205 | self.manstart = self.bundlefile.tell() |
|
205 | 206 | return self.changelog |
|
206 | 207 | if name == 'manifest': |
|
207 | 208 | self.bundlefile.seek(self.manstart) |
|
208 | 209 | self.manifest = bundlemanifest(self.sopener, self.bundlefile, |
|
209 | 210 | self.changelog.rev) |
|
210 | 211 | self.filestart = self.bundlefile.tell() |
|
211 | 212 | return self.manifest |
|
212 | 213 | if name == 'manstart': |
|
213 | 214 | self.changelog |
|
214 | 215 | return self.manstart |
|
215 | 216 | if name == 'filestart': |
|
216 | 217 | self.manifest |
|
217 | 218 | return self.filestart |
|
218 | 219 | return localrepo.localrepository.__getattr__(self, name) |
|
219 | 220 | |
|
220 | 221 | def url(self): |
|
221 | 222 | return self._url |
|
222 | 223 | |
|
223 | 224 | def dev(self): |
|
224 | 225 | return -1 |
|
225 | 226 | |
|
226 | 227 | def file(self, f): |
|
227 | 228 | if not self.bundlefilespos: |
|
228 | 229 | self.bundlefile.seek(self.filestart) |
|
229 | 230 | while 1: |
|
230 | 231 | chunk = changegroup.getchunk(self.bundlefile) |
|
231 | 232 | if not chunk: |
|
232 | 233 | break |
|
233 | 234 | self.bundlefilespos[chunk] = self.bundlefile.tell() |
|
234 | 235 | for c in changegroup.chunkiter(self.bundlefile): |
|
235 | 236 | pass |
|
236 | 237 | |
|
237 | 238 | if f[0] == '/': |
|
238 | 239 | f = f[1:] |
|
239 | 240 | if f in self.bundlefilespos: |
|
240 | 241 | self.bundlefile.seek(self.bundlefilespos[f]) |
|
241 | 242 | return bundlefilelog(self.sopener, f, self.bundlefile, |
|
242 | 243 | self.changelog.rev) |
|
243 | 244 | else: |
|
244 | 245 | return filelog.filelog(self.sopener, f) |
|
245 | 246 | |
|
246 | 247 | def close(self): |
|
247 | 248 | """Close assigned bundle file immediately.""" |
|
248 | 249 | self.bundlefile.close() |
|
249 | 250 | |
|
250 | 251 | def __del__(self): |
|
251 | 252 | bundlefile = getattr(self, 'bundlefile', None) |
|
252 | 253 | if bundlefile and not bundlefile.closed: |
|
253 | 254 | bundlefile.close() |
|
254 | 255 | tempfile = getattr(self, 'tempfile', None) |
|
255 | 256 | if tempfile is not None: |
|
256 | 257 | os.unlink(tempfile) |
|
257 | 258 | |
|
258 | 259 | def instance(ui, path, create): |
|
259 | 260 | if create: |
|
260 | 261 | raise util.Abort(_('cannot create new bundle repository')) |
|
261 | 262 | parentpath = ui.config("bundle", "mainreporoot", "") |
|
262 | 263 | if parentpath: |
|
263 | 264 | # Try to make the full path relative so we get a nice, short URL. |
|
264 | 265 | # In particular, we don't want temp dir names in test outputs. |
|
265 | 266 | cwd = os.getcwd() |
|
266 | 267 | if parentpath == cwd: |
|
267 | 268 | parentpath = '' |
|
268 | 269 | else: |
|
269 | 270 | cwd = os.path.join(cwd,'') |
|
270 | 271 | if parentpath.startswith(cwd): |
|
271 | 272 | parentpath = parentpath[len(cwd):] |
|
272 | 273 | path = util.drop_scheme('file', path) |
|
273 | 274 | if path.startswith('bundle:'): |
|
274 | 275 | path = util.drop_scheme('bundle', path) |
|
275 | 276 | s = path.split("+", 1) |
|
276 | 277 | if len(s) == 1: |
|
277 | 278 | repopath, bundlename = parentpath, s[0] |
|
278 | 279 | else: |
|
279 | 280 | repopath, bundlename = s |
|
280 | 281 | else: |
|
281 | 282 | repopath, bundlename = parentpath, path |
|
282 | 283 | return bundlerepository(ui, repopath, bundlename) |
@@ -1,620 +1,622 b'' | |||
|
1 | 1 | # context.py - changeset and file context objects for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms |
|
6 | 6 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | 7 | |
|
8 | 8 | from node import nullid, nullrev, short |
|
9 | 9 | from i18n import _ |
|
10 | 10 | import ancestor, bdiff, revlog, util, os, errno |
|
11 | 11 | |
|
12 | 12 | class changectx(object): |
|
13 | 13 | """A changecontext object makes access to data related to a particular |
|
14 | 14 | changeset convenient.""" |
|
15 | 15 | def __init__(self, repo, changeid=None): |
|
16 | 16 | """changeid is a revision number, node, or tag""" |
|
17 | 17 | self._repo = repo |
|
18 | 18 | |
|
19 | 19 | if not changeid and changeid != 0: |
|
20 | 20 | p1, p2 = self._repo.dirstate.parents() |
|
21 | 21 | self._rev = self._repo.changelog.rev(p1) |
|
22 | 22 | if self._rev == -1: |
|
23 | 23 | changeid = 'tip' |
|
24 | 24 | else: |
|
25 | 25 | self._node = p1 |
|
26 | 26 | return |
|
27 | 27 | |
|
28 | 28 | self._node = self._repo.lookup(changeid) |
|
29 | 29 | self._rev = self._repo.changelog.rev(self._node) |
|
30 | 30 | |
|
31 | 31 | def __str__(self): |
|
32 | 32 | return short(self.node()) |
|
33 | 33 | |
|
34 | 34 | def __repr__(self): |
|
35 | 35 | return "<changectx %s>" % str(self) |
|
36 | 36 | |
|
37 | 37 | def __eq__(self, other): |
|
38 | 38 | try: |
|
39 | 39 | return self._rev == other._rev |
|
40 | 40 | except AttributeError: |
|
41 | 41 | return False |
|
42 | 42 | |
|
43 | 43 | def __ne__(self, other): |
|
44 | 44 | return not (self == other) |
|
45 | 45 | |
|
46 | 46 | def __nonzero__(self): |
|
47 | 47 | return self._rev != nullrev |
|
48 | 48 | |
|
49 | 49 | def __getattr__(self, name): |
|
50 | 50 | if name == '_changeset': |
|
51 | 51 | self._changeset = self._repo.changelog.read(self.node()) |
|
52 | 52 | return self._changeset |
|
53 | 53 | elif name == '_manifest': |
|
54 | 54 | self._manifest = self._repo.manifest.read(self._changeset[0]) |
|
55 | 55 | return self._manifest |
|
56 | 56 | elif name == '_manifestdelta': |
|
57 | 57 | md = self._repo.manifest.readdelta(self._changeset[0]) |
|
58 | 58 | self._manifestdelta = md |
|
59 | 59 | return self._manifestdelta |
|
60 | 60 | else: |
|
61 | 61 | raise AttributeError, name |
|
62 | 62 | |
|
63 | 63 | def __contains__(self, key): |
|
64 | 64 | return key in self._manifest |
|
65 | 65 | |
|
66 | 66 | def __getitem__(self, key): |
|
67 | 67 | return self.filectx(key) |
|
68 | 68 | |
|
69 | 69 | def __iter__(self): |
|
70 | 70 | a = self._manifest.keys() |
|
71 | 71 | a.sort() |
|
72 | 72 | for f in a: |
|
73 | 73 | yield f |
|
74 | 74 | |
|
75 | 75 | def changeset(self): return self._changeset |
|
76 | 76 | def manifest(self): return self._manifest |
|
77 | 77 | |
|
78 | 78 | def rev(self): return self._rev |
|
79 | 79 | def node(self): return self._node |
|
80 | 80 | def user(self): return self._changeset[1] |
|
81 | 81 | def date(self): return self._changeset[2] |
|
82 | 82 | def files(self): return self._changeset[3] |
|
83 | 83 | def description(self): return self._changeset[4] |
|
84 | 84 | def branch(self): return self._changeset[5].get("branch") |
|
85 | 85 | def extra(self): return self._changeset[5] |
|
86 | 86 | def tags(self): return self._repo.nodetags(self._node) |
|
87 | 87 | |
|
88 | 88 | def parents(self): |
|
89 | 89 | """return contexts for each parent changeset""" |
|
90 | 90 | p = self._repo.changelog.parents(self._node) |
|
91 | 91 | return [changectx(self._repo, x) for x in p] |
|
92 | 92 | |
|
93 | 93 | def children(self): |
|
94 | 94 | """return contexts for each child changeset""" |
|
95 | 95 | c = self._repo.changelog.children(self._node) |
|
96 | 96 | return [changectx(self._repo, x) for x in c] |
|
97 | 97 | |
|
98 | 98 | def _fileinfo(self, path): |
|
99 | 99 | if '_manifest' in self.__dict__: |
|
100 | 100 | try: |
|
101 | 101 | return self._manifest[path], self._manifest.flags(path) |
|
102 | 102 | except KeyError: |
|
103 |
raise revlog.LookupError(path, |
|
|
103 | raise revlog.LookupError(self._node, path, | |
|
104 | _('not found in manifest')) | |
|
104 | 105 | if '_manifestdelta' in self.__dict__ or path in self.files(): |
|
105 | 106 | if path in self._manifestdelta: |
|
106 | 107 | return self._manifestdelta[path], self._manifestdelta.flags(path) |
|
107 | 108 | node, flag = self._repo.manifest.find(self._changeset[0], path) |
|
108 | 109 | if not node: |
|
109 |
raise revlog.LookupError(path, |
|
|
110 | raise revlog.LookupError(self._node, path, | |
|
111 | _('not found in manifest')) | |
|
110 | 112 | |
|
111 | 113 | return node, flag |
|
112 | 114 | |
|
113 | 115 | def filenode(self, path): |
|
114 | 116 | return self._fileinfo(path)[0] |
|
115 | 117 | |
|
116 | 118 | def fileflags(self, path): |
|
117 | 119 | try: |
|
118 | 120 | return self._fileinfo(path)[1] |
|
119 | 121 | except revlog.LookupError: |
|
120 | 122 | return '' |
|
121 | 123 | |
|
122 | 124 | def filectx(self, path, fileid=None, filelog=None): |
|
123 | 125 | """get a file context from this changeset""" |
|
124 | 126 | if fileid is None: |
|
125 | 127 | fileid = self.filenode(path) |
|
126 | 128 | return filectx(self._repo, path, fileid=fileid, |
|
127 | 129 | changectx=self, filelog=filelog) |
|
128 | 130 | |
|
129 | 131 | def filectxs(self): |
|
130 | 132 | """generate a file context for each file in this changeset's |
|
131 | 133 | manifest""" |
|
132 | 134 | mf = self.manifest() |
|
133 | 135 | m = mf.keys() |
|
134 | 136 | m.sort() |
|
135 | 137 | for f in m: |
|
136 | 138 | yield self.filectx(f, fileid=mf[f]) |
|
137 | 139 | |
|
138 | 140 | def ancestor(self, c2): |
|
139 | 141 | """ |
|
140 | 142 | return the ancestor context of self and c2 |
|
141 | 143 | """ |
|
142 | 144 | n = self._repo.changelog.ancestor(self._node, c2._node) |
|
143 | 145 | return changectx(self._repo, n) |
|
144 | 146 | |
|
145 | 147 | class filectx(object): |
|
146 | 148 | """A filecontext object makes access to data related to a particular |
|
147 | 149 | filerevision convenient.""" |
|
148 | 150 | def __init__(self, repo, path, changeid=None, fileid=None, |
|
149 | 151 | filelog=None, changectx=None): |
|
150 | 152 | """changeid can be a changeset revision, node, or tag. |
|
151 | 153 | fileid can be a file revision or node.""" |
|
152 | 154 | self._repo = repo |
|
153 | 155 | self._path = path |
|
154 | 156 | |
|
155 | 157 | assert (changeid is not None |
|
156 | 158 | or fileid is not None |
|
157 | 159 | or changectx is not None) |
|
158 | 160 | |
|
159 | 161 | if filelog: |
|
160 | 162 | self._filelog = filelog |
|
161 | 163 | |
|
162 | 164 | if changeid is not None: |
|
163 | 165 | self._changeid = changeid |
|
164 | 166 | if changectx is not None: |
|
165 | 167 | self._changectx = changectx |
|
166 | 168 | if fileid is not None: |
|
167 | 169 | self._fileid = fileid |
|
168 | 170 | |
|
169 | 171 | def __getattr__(self, name): |
|
170 | 172 | if name == '_changectx': |
|
171 | 173 | self._changectx = changectx(self._repo, self._changeid) |
|
172 | 174 | return self._changectx |
|
173 | 175 | elif name == '_filelog': |
|
174 | 176 | self._filelog = self._repo.file(self._path) |
|
175 | 177 | return self._filelog |
|
176 | 178 | elif name == '_changeid': |
|
177 | 179 | if '_changectx' in self.__dict__: |
|
178 | 180 | self._changeid = self._changectx.rev() |
|
179 | 181 | else: |
|
180 | 182 | self._changeid = self._filelog.linkrev(self._filenode) |
|
181 | 183 | return self._changeid |
|
182 | 184 | elif name == '_filenode': |
|
183 | 185 | if '_fileid' in self.__dict__: |
|
184 | 186 | self._filenode = self._filelog.lookup(self._fileid) |
|
185 | 187 | else: |
|
186 | 188 | self._filenode = self._changectx.filenode(self._path) |
|
187 | 189 | return self._filenode |
|
188 | 190 | elif name == '_filerev': |
|
189 | 191 | self._filerev = self._filelog.rev(self._filenode) |
|
190 | 192 | return self._filerev |
|
191 | 193 | else: |
|
192 | 194 | raise AttributeError, name |
|
193 | 195 | |
|
194 | 196 | def __nonzero__(self): |
|
195 | 197 | try: |
|
196 | 198 | n = self._filenode |
|
197 | 199 | return True |
|
198 | 200 | except revlog.LookupError: |
|
199 | 201 | # file is missing |
|
200 | 202 | return False |
|
201 | 203 | |
|
202 | 204 | def __str__(self): |
|
203 | 205 | return "%s@%s" % (self.path(), short(self.node())) |
|
204 | 206 | |
|
205 | 207 | def __repr__(self): |
|
206 | 208 | return "<filectx %s>" % str(self) |
|
207 | 209 | |
|
208 | 210 | def __eq__(self, other): |
|
209 | 211 | try: |
|
210 | 212 | return (self._path == other._path |
|
211 | 213 | and self._fileid == other._fileid) |
|
212 | 214 | except AttributeError: |
|
213 | 215 | return False |
|
214 | 216 | |
|
215 | 217 | def __ne__(self, other): |
|
216 | 218 | return not (self == other) |
|
217 | 219 | |
|
218 | 220 | def filectx(self, fileid): |
|
219 | 221 | '''opens an arbitrary revision of the file without |
|
220 | 222 | opening a new filelog''' |
|
221 | 223 | return filectx(self._repo, self._path, fileid=fileid, |
|
222 | 224 | filelog=self._filelog) |
|
223 | 225 | |
|
224 | 226 | def filerev(self): return self._filerev |
|
225 | 227 | def filenode(self): return self._filenode |
|
226 | 228 | def fileflags(self): return self._changectx.fileflags(self._path) |
|
227 | 229 | def isexec(self): return 'x' in self.fileflags() |
|
228 | 230 | def islink(self): return 'l' in self.fileflags() |
|
229 | 231 | def filelog(self): return self._filelog |
|
230 | 232 | |
|
231 | 233 | def rev(self): |
|
232 | 234 | if '_changectx' in self.__dict__: |
|
233 | 235 | return self._changectx.rev() |
|
234 | 236 | if '_changeid' in self.__dict__: |
|
235 | 237 | return self._changectx.rev() |
|
236 | 238 | return self._filelog.linkrev(self._filenode) |
|
237 | 239 | |
|
238 | 240 | def linkrev(self): return self._filelog.linkrev(self._filenode) |
|
239 | 241 | def node(self): return self._changectx.node() |
|
240 | 242 | def user(self): return self._changectx.user() |
|
241 | 243 | def date(self): return self._changectx.date() |
|
242 | 244 | def files(self): return self._changectx.files() |
|
243 | 245 | def description(self): return self._changectx.description() |
|
244 | 246 | def branch(self): return self._changectx.branch() |
|
245 | 247 | def manifest(self): return self._changectx.manifest() |
|
246 | 248 | def changectx(self): return self._changectx |
|
247 | 249 | |
|
248 | 250 | def data(self): return self._filelog.read(self._filenode) |
|
249 | 251 | def path(self): return self._path |
|
250 | 252 | def size(self): return self._filelog.size(self._filerev) |
|
251 | 253 | |
|
252 | 254 | def cmp(self, text): return self._filelog.cmp(self._filenode, text) |
|
253 | 255 | |
|
254 | 256 | def renamed(self): |
|
255 | 257 | """check if file was actually renamed in this changeset revision |
|
256 | 258 | |
|
257 | 259 | If rename logged in file revision, we report copy for changeset only |
|
258 | 260 | if file revisions linkrev points back to the changeset in question |
|
259 | 261 | or both changeset parents contain different file revisions. |
|
260 | 262 | """ |
|
261 | 263 | |
|
262 | 264 | renamed = self._filelog.renamed(self._filenode) |
|
263 | 265 | if not renamed: |
|
264 | 266 | return renamed |
|
265 | 267 | |
|
266 | 268 | if self.rev() == self.linkrev(): |
|
267 | 269 | return renamed |
|
268 | 270 | |
|
269 | 271 | name = self.path() |
|
270 | 272 | fnode = self._filenode |
|
271 | 273 | for p in self._changectx.parents(): |
|
272 | 274 | try: |
|
273 | 275 | if fnode == p.filenode(name): |
|
274 | 276 | return None |
|
275 | 277 | except revlog.LookupError: |
|
276 | 278 | pass |
|
277 | 279 | return renamed |
|
278 | 280 | |
|
279 | 281 | def parents(self): |
|
280 | 282 | p = self._path |
|
281 | 283 | fl = self._filelog |
|
282 | 284 | pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)] |
|
283 | 285 | |
|
284 | 286 | r = self._filelog.renamed(self._filenode) |
|
285 | 287 | if r: |
|
286 | 288 | pl[0] = (r[0], r[1], None) |
|
287 | 289 | |
|
288 | 290 | return [filectx(self._repo, p, fileid=n, filelog=l) |
|
289 | 291 | for p,n,l in pl if n != nullid] |
|
290 | 292 | |
|
291 | 293 | def children(self): |
|
292 | 294 | # hard for renames |
|
293 | 295 | c = self._filelog.children(self._filenode) |
|
294 | 296 | return [filectx(self._repo, self._path, fileid=x, |
|
295 | 297 | filelog=self._filelog) for x in c] |
|
296 | 298 | |
|
297 | 299 | def annotate(self, follow=False, linenumber=None): |
|
298 | 300 | '''returns a list of tuples of (ctx, line) for each line |
|
299 | 301 | in the file, where ctx is the filectx of the node where |
|
300 | 302 | that line was last changed. |
|
301 | 303 | This returns tuples of ((ctx, linenumber), line) for each line, |
|
302 | 304 | if "linenumber" parameter is NOT "None". |
|
303 | 305 | In such tuples, linenumber means one at the first appearance |
|
304 | 306 | in the managed file. |
|
305 | 307 | To reduce annotation cost, |
|
306 | 308 | this returns fixed value(False is used) as linenumber, |
|
307 | 309 | if "linenumber" parameter is "False".''' |
|
308 | 310 | |
|
309 | 311 | def decorate_compat(text, rev): |
|
310 | 312 | return ([rev] * len(text.splitlines()), text) |
|
311 | 313 | |
|
312 | 314 | def without_linenumber(text, rev): |
|
313 | 315 | return ([(rev, False)] * len(text.splitlines()), text) |
|
314 | 316 | |
|
315 | 317 | def with_linenumber(text, rev): |
|
316 | 318 | size = len(text.splitlines()) |
|
317 | 319 | return ([(rev, i) for i in xrange(1, size + 1)], text) |
|
318 | 320 | |
|
319 | 321 | decorate = (((linenumber is None) and decorate_compat) or |
|
320 | 322 | (linenumber and with_linenumber) or |
|
321 | 323 | without_linenumber) |
|
322 | 324 | |
|
323 | 325 | def pair(parent, child): |
|
324 | 326 | for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]): |
|
325 | 327 | child[0][b1:b2] = parent[0][a1:a2] |
|
326 | 328 | return child |
|
327 | 329 | |
|
328 | 330 | getlog = util.cachefunc(lambda x: self._repo.file(x)) |
|
329 | 331 | def getctx(path, fileid): |
|
330 | 332 | log = path == self._path and self._filelog or getlog(path) |
|
331 | 333 | return filectx(self._repo, path, fileid=fileid, filelog=log) |
|
332 | 334 | getctx = util.cachefunc(getctx) |
|
333 | 335 | |
|
334 | 336 | def parents(f): |
|
335 | 337 | # we want to reuse filectx objects as much as possible |
|
336 | 338 | p = f._path |
|
337 | 339 | if f._filerev is None: # working dir |
|
338 | 340 | pl = [(n.path(), n.filerev()) for n in f.parents()] |
|
339 | 341 | else: |
|
340 | 342 | pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)] |
|
341 | 343 | |
|
342 | 344 | if follow: |
|
343 | 345 | r = f.renamed() |
|
344 | 346 | if r: |
|
345 | 347 | pl[0] = (r[0], getlog(r[0]).rev(r[1])) |
|
346 | 348 | |
|
347 | 349 | return [getctx(p, n) for p, n in pl if n != nullrev] |
|
348 | 350 | |
|
349 | 351 | # use linkrev to find the first changeset where self appeared |
|
350 | 352 | if self.rev() != self.linkrev(): |
|
351 | 353 | base = self.filectx(self.filerev()) |
|
352 | 354 | else: |
|
353 | 355 | base = self |
|
354 | 356 | |
|
355 | 357 | # find all ancestors |
|
356 | 358 | needed = {base: 1} |
|
357 | 359 | visit = [base] |
|
358 | 360 | files = [base._path] |
|
359 | 361 | while visit: |
|
360 | 362 | f = visit.pop(0) |
|
361 | 363 | for p in parents(f): |
|
362 | 364 | if p not in needed: |
|
363 | 365 | needed[p] = 1 |
|
364 | 366 | visit.append(p) |
|
365 | 367 | if p._path not in files: |
|
366 | 368 | files.append(p._path) |
|
367 | 369 | else: |
|
368 | 370 | # count how many times we'll use this |
|
369 | 371 | needed[p] += 1 |
|
370 | 372 | |
|
371 | 373 | # sort by revision (per file) which is a topological order |
|
372 | 374 | visit = [] |
|
373 | 375 | for f in files: |
|
374 | 376 | fn = [(n.rev(), n) for n in needed.keys() if n._path == f] |
|
375 | 377 | visit.extend(fn) |
|
376 | 378 | visit.sort() |
|
377 | 379 | hist = {} |
|
378 | 380 | |
|
379 | 381 | for r, f in visit: |
|
380 | 382 | curr = decorate(f.data(), f) |
|
381 | 383 | for p in parents(f): |
|
382 | 384 | if p != nullid: |
|
383 | 385 | curr = pair(hist[p], curr) |
|
384 | 386 | # trim the history of unneeded revs |
|
385 | 387 | needed[p] -= 1 |
|
386 | 388 | if not needed[p]: |
|
387 | 389 | del hist[p] |
|
388 | 390 | hist[f] = curr |
|
389 | 391 | |
|
390 | 392 | return zip(hist[f][0], hist[f][1].splitlines(1)) |
|
391 | 393 | |
|
392 | 394 | def ancestor(self, fc2): |
|
393 | 395 | """ |
|
394 | 396 | find the common ancestor file context, if any, of self, and fc2 |
|
395 | 397 | """ |
|
396 | 398 | |
|
397 | 399 | acache = {} |
|
398 | 400 | |
|
399 | 401 | # prime the ancestor cache for the working directory |
|
400 | 402 | for c in (self, fc2): |
|
401 | 403 | if c._filerev == None: |
|
402 | 404 | pl = [(n.path(), n.filenode()) for n in c.parents()] |
|
403 | 405 | acache[(c._path, None)] = pl |
|
404 | 406 | |
|
405 | 407 | flcache = {self._path:self._filelog, fc2._path:fc2._filelog} |
|
406 | 408 | def parents(vertex): |
|
407 | 409 | if vertex in acache: |
|
408 | 410 | return acache[vertex] |
|
409 | 411 | f, n = vertex |
|
410 | 412 | if f not in flcache: |
|
411 | 413 | flcache[f] = self._repo.file(f) |
|
412 | 414 | fl = flcache[f] |
|
413 | 415 | pl = [(f, p) for p in fl.parents(n) if p != nullid] |
|
414 | 416 | re = fl.renamed(n) |
|
415 | 417 | if re: |
|
416 | 418 | pl.append(re) |
|
417 | 419 | acache[vertex] = pl |
|
418 | 420 | return pl |
|
419 | 421 | |
|
420 | 422 | a, b = (self._path, self._filenode), (fc2._path, fc2._filenode) |
|
421 | 423 | v = ancestor.ancestor(a, b, parents) |
|
422 | 424 | if v: |
|
423 | 425 | f, n = v |
|
424 | 426 | return filectx(self._repo, f, fileid=n, filelog=flcache[f]) |
|
425 | 427 | |
|
426 | 428 | return None |
|
427 | 429 | |
|
428 | 430 | class workingctx(changectx): |
|
429 | 431 | """A workingctx object makes access to data related to |
|
430 | 432 | the current working directory convenient.""" |
|
431 | 433 | def __init__(self, repo): |
|
432 | 434 | self._repo = repo |
|
433 | 435 | self._rev = None |
|
434 | 436 | self._node = None |
|
435 | 437 | |
|
436 | 438 | def __str__(self): |
|
437 | 439 | return str(self._parents[0]) + "+" |
|
438 | 440 | |
|
439 | 441 | def __nonzero__(self): |
|
440 | 442 | return True |
|
441 | 443 | |
|
442 | 444 | def __getattr__(self, name): |
|
443 | 445 | if name == '_parents': |
|
444 | 446 | self._parents = self._repo.parents() |
|
445 | 447 | return self._parents |
|
446 | 448 | if name == '_status': |
|
447 | 449 | self._status = self._repo.status() |
|
448 | 450 | return self._status |
|
449 | 451 | if name == '_manifest': |
|
450 | 452 | self._buildmanifest() |
|
451 | 453 | return self._manifest |
|
452 | 454 | else: |
|
453 | 455 | raise AttributeError, name |
|
454 | 456 | |
|
455 | 457 | def _buildmanifest(self): |
|
456 | 458 | """generate a manifest corresponding to the working directory""" |
|
457 | 459 | |
|
458 | 460 | man = self._parents[0].manifest().copy() |
|
459 | 461 | copied = self._repo.dirstate.copies() |
|
460 | 462 | is_exec = util.execfunc(self._repo.root, |
|
461 | 463 | lambda p: man.execf(copied.get(p,p))) |
|
462 | 464 | is_link = util.linkfunc(self._repo.root, |
|
463 | 465 | lambda p: man.linkf(copied.get(p,p))) |
|
464 | 466 | modified, added, removed, deleted, unknown = self._status[:5] |
|
465 | 467 | for i, l in (("a", added), ("m", modified), ("u", unknown)): |
|
466 | 468 | for f in l: |
|
467 | 469 | man[f] = man.get(copied.get(f, f), nullid) + i |
|
468 | 470 | try: |
|
469 | 471 | man.set(f, is_exec(f), is_link(f)) |
|
470 | 472 | except OSError: |
|
471 | 473 | pass |
|
472 | 474 | |
|
473 | 475 | for f in deleted + removed: |
|
474 | 476 | if f in man: |
|
475 | 477 | del man[f] |
|
476 | 478 | |
|
477 | 479 | self._manifest = man |
|
478 | 480 | |
|
479 | 481 | def manifest(self): return self._manifest |
|
480 | 482 | |
|
481 | 483 | def user(self): return self._repo.ui.username() |
|
482 | 484 | def date(self): return util.makedate() |
|
483 | 485 | def description(self): return "" |
|
484 | 486 | def files(self): |
|
485 | 487 | f = self.modified() + self.added() + self.removed() |
|
486 | 488 | f.sort() |
|
487 | 489 | return f |
|
488 | 490 | |
|
489 | 491 | def modified(self): return self._status[0] |
|
490 | 492 | def added(self): return self._status[1] |
|
491 | 493 | def removed(self): return self._status[2] |
|
492 | 494 | def deleted(self): return self._status[3] |
|
493 | 495 | def unknown(self): return self._status[4] |
|
494 | 496 | def clean(self): return self._status[5] |
|
495 | 497 | def branch(self): return self._repo.dirstate.branch() |
|
496 | 498 | |
|
497 | 499 | def tags(self): |
|
498 | 500 | t = [] |
|
499 | 501 | [t.extend(p.tags()) for p in self.parents()] |
|
500 | 502 | return t |
|
501 | 503 | |
|
502 | 504 | def parents(self): |
|
503 | 505 | """return contexts for each parent changeset""" |
|
504 | 506 | return self._parents |
|
505 | 507 | |
|
506 | 508 | def children(self): |
|
507 | 509 | return [] |
|
508 | 510 | |
|
509 | 511 | def fileflags(self, path): |
|
510 | 512 | if '_manifest' in self.__dict__: |
|
511 | 513 | try: |
|
512 | 514 | return self._manifest.flags(path) |
|
513 | 515 | except KeyError: |
|
514 | 516 | return '' |
|
515 | 517 | |
|
516 | 518 | pnode = self._parents[0].changeset()[0] |
|
517 | 519 | orig = self._repo.dirstate.copies().get(path, path) |
|
518 | 520 | node, flag = self._repo.manifest.find(pnode, orig) |
|
519 | 521 | is_link = util.linkfunc(self._repo.root, lambda p: 'l' in flag) |
|
520 | 522 | is_exec = util.execfunc(self._repo.root, lambda p: 'x' in flag) |
|
521 | 523 | try: |
|
522 | 524 | return (is_link(path) and 'l' or '') + (is_exec(path) and 'e' or '') |
|
523 | 525 | except OSError: |
|
524 | 526 | pass |
|
525 | 527 | |
|
526 | 528 | if not node or path in self.deleted() or path in self.removed(): |
|
527 | 529 | return '' |
|
528 | 530 | return flag |
|
529 | 531 | |
|
530 | 532 | def filectx(self, path, filelog=None): |
|
531 | 533 | """get a file context from the working directory""" |
|
532 | 534 | return workingfilectx(self._repo, path, workingctx=self, |
|
533 | 535 | filelog=filelog) |
|
534 | 536 | |
|
535 | 537 | def ancestor(self, c2): |
|
536 | 538 | """return the ancestor context of self and c2""" |
|
537 | 539 | return self._parents[0].ancestor(c2) # punt on two parents for now |
|
538 | 540 | |
|
539 | 541 | class workingfilectx(filectx): |
|
540 | 542 | """A workingfilectx object makes access to data related to a particular |
|
541 | 543 | file in the working directory convenient.""" |
|
542 | 544 | def __init__(self, repo, path, filelog=None, workingctx=None): |
|
543 | 545 | """changeid can be a changeset revision, node, or tag. |
|
544 | 546 | fileid can be a file revision or node.""" |
|
545 | 547 | self._repo = repo |
|
546 | 548 | self._path = path |
|
547 | 549 | self._changeid = None |
|
548 | 550 | self._filerev = self._filenode = None |
|
549 | 551 | |
|
550 | 552 | if filelog: |
|
551 | 553 | self._filelog = filelog |
|
552 | 554 | if workingctx: |
|
553 | 555 | self._changectx = workingctx |
|
554 | 556 | |
|
555 | 557 | def __getattr__(self, name): |
|
556 | 558 | if name == '_changectx': |
|
557 | 559 | self._changectx = workingctx(self._repo) |
|
558 | 560 | return self._changectx |
|
559 | 561 | elif name == '_repopath': |
|
560 | 562 | self._repopath = (self._repo.dirstate.copied(self._path) |
|
561 | 563 | or self._path) |
|
562 | 564 | return self._repopath |
|
563 | 565 | elif name == '_filelog': |
|
564 | 566 | self._filelog = self._repo.file(self._repopath) |
|
565 | 567 | return self._filelog |
|
566 | 568 | else: |
|
567 | 569 | raise AttributeError, name |
|
568 | 570 | |
|
569 | 571 | def __nonzero__(self): |
|
570 | 572 | return True |
|
571 | 573 | |
|
572 | 574 | def __str__(self): |
|
573 | 575 | return "%s@%s" % (self.path(), self._changectx) |
|
574 | 576 | |
|
575 | 577 | def filectx(self, fileid): |
|
576 | 578 | '''opens an arbitrary revision of the file without |
|
577 | 579 | opening a new filelog''' |
|
578 | 580 | return filectx(self._repo, self._repopath, fileid=fileid, |
|
579 | 581 | filelog=self._filelog) |
|
580 | 582 | |
|
581 | 583 | def rev(self): |
|
582 | 584 | if '_changectx' in self.__dict__: |
|
583 | 585 | return self._changectx.rev() |
|
584 | 586 | return self._filelog.linkrev(self._filenode) |
|
585 | 587 | |
|
586 | 588 | def data(self): return self._repo.wread(self._path) |
|
587 | 589 | def renamed(self): |
|
588 | 590 | rp = self._repopath |
|
589 | 591 | if rp == self._path: |
|
590 | 592 | return None |
|
591 | 593 | return rp, self._changectx._parents[0]._manifest.get(rp, nullid) |
|
592 | 594 | |
|
593 | 595 | def parents(self): |
|
594 | 596 | '''return parent filectxs, following copies if necessary''' |
|
595 | 597 | p = self._path |
|
596 | 598 | rp = self._repopath |
|
597 | 599 | pcl = self._changectx._parents |
|
598 | 600 | fl = self._filelog |
|
599 | 601 | pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)] |
|
600 | 602 | if len(pcl) > 1: |
|
601 | 603 | if rp != p: |
|
602 | 604 | fl = None |
|
603 | 605 | pl.append((p, pcl[1]._manifest.get(p, nullid), fl)) |
|
604 | 606 | |
|
605 | 607 | return [filectx(self._repo, p, fileid=n, filelog=l) |
|
606 | 608 | for p,n,l in pl if n != nullid] |
|
607 | 609 | |
|
608 | 610 | def children(self): |
|
609 | 611 | return [] |
|
610 | 612 | |
|
611 | 613 | def size(self): return os.stat(self._repo.wjoin(self._path)).st_size |
|
612 | 614 | def date(self): |
|
613 | 615 | t, tz = self._changectx.date() |
|
614 | 616 | try: |
|
615 | 617 | return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz) |
|
616 | 618 | except OSError, err: |
|
617 | 619 | if err.errno != errno.ENOENT: raise |
|
618 | 620 | return (t, tz) |
|
619 | 621 | |
|
620 | 622 | def cmp(self, text): return self._repo.wread(self._path) == text |
@@ -1,1319 +1,1319 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | revlog.py - storage back-end for mercurial |
|
3 | 3 | |
|
4 | 4 | This provides efficient delta storage with O(1) retrieve and append |
|
5 | 5 | and O(changes) merge between branches |
|
6 | 6 | |
|
7 | 7 | Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
8 | 8 | |
|
9 | 9 | This software may be used and distributed according to the terms |
|
10 | 10 | of the GNU General Public License, incorporated herein by reference. |
|
11 | 11 | """ |
|
12 | 12 | |
|
13 | 13 | from node import bin, hex, nullid, nullrev, short |
|
14 | 14 | from i18n import _ |
|
15 | 15 | import changegroup, errno, ancestor, mdiff |
|
16 | 16 | import sha, struct, util, zlib |
|
17 | 17 | |
|
18 | 18 | _pack = struct.pack |
|
19 | 19 | _unpack = struct.unpack |
|
20 | 20 | _compress = zlib.compress |
|
21 | 21 | _decompress = zlib.decompress |
|
22 | 22 | _sha = sha.new |
|
23 | 23 | |
|
24 | 24 | # revlog flags |
|
25 | 25 | REVLOGV0 = 0 |
|
26 | 26 | REVLOGNG = 1 |
|
27 | 27 | REVLOGNGINLINEDATA = (1 << 16) |
|
28 | 28 | REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA |
|
29 | 29 | REVLOG_DEFAULT_FORMAT = REVLOGNG |
|
30 | 30 | REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS |
|
31 | 31 | |
|
32 | 32 | class RevlogError(Exception): |
|
33 | 33 | pass |
|
34 | 34 | |
|
35 | 35 | class LookupError(RevlogError): |
|
36 |
def __init__(self, name, message |
|
|
37 | if message is None: | |
|
38 | message = _('not found: %s') % name | |
|
39 | RevlogError.__init__(self, message) | |
|
36 | def __init__(self, name, index, message): | |
|
40 | 37 | self.name = name |
|
38 | if isinstance(name, str) and len(name) == 20: | |
|
39 | name = short(name) | |
|
40 | RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message)) | |
|
41 | 41 | |
|
42 | 42 | def getoffset(q): |
|
43 | 43 | return int(q >> 16) |
|
44 | 44 | |
|
45 | 45 | def gettype(q): |
|
46 | 46 | return int(q & 0xFFFF) |
|
47 | 47 | |
|
48 | 48 | def offset_type(offset, type): |
|
49 | 49 | return long(long(offset) << 16 | type) |
|
50 | 50 | |
|
51 | 51 | def hash(text, p1, p2): |
|
52 | 52 | """generate a hash from the given text and its parent hashes |
|
53 | 53 | |
|
54 | 54 | This hash combines both the current file contents and its history |
|
55 | 55 | in a manner that makes it easy to distinguish nodes with the same |
|
56 | 56 | content in the revision graph. |
|
57 | 57 | """ |
|
58 | 58 | l = [p1, p2] |
|
59 | 59 | l.sort() |
|
60 | 60 | s = _sha(l[0]) |
|
61 | 61 | s.update(l[1]) |
|
62 | 62 | s.update(text) |
|
63 | 63 | return s.digest() |
|
64 | 64 | |
|
65 | 65 | def compress(text): |
|
66 | 66 | """ generate a possibly-compressed representation of text """ |
|
67 | 67 | if not text: |
|
68 | 68 | return ("", text) |
|
69 | 69 | l = len(text) |
|
70 | 70 | bin = None |
|
71 | 71 | if l < 44: |
|
72 | 72 | pass |
|
73 | 73 | elif l > 1000000: |
|
74 | 74 | # zlib makes an internal copy, thus doubling memory usage for |
|
75 | 75 | # large files, so lets do this in pieces |
|
76 | 76 | z = zlib.compressobj() |
|
77 | 77 | p = [] |
|
78 | 78 | pos = 0 |
|
79 | 79 | while pos < l: |
|
80 | 80 | pos2 = pos + 2**20 |
|
81 | 81 | p.append(z.compress(text[pos:pos2])) |
|
82 | 82 | pos = pos2 |
|
83 | 83 | p.append(z.flush()) |
|
84 | 84 | if sum(map(len, p)) < l: |
|
85 | 85 | bin = "".join(p) |
|
86 | 86 | else: |
|
87 | 87 | bin = _compress(text) |
|
88 | 88 | if bin is None or len(bin) > l: |
|
89 | 89 | if text[0] == '\0': |
|
90 | 90 | return ("", text) |
|
91 | 91 | return ('u', text) |
|
92 | 92 | return ("", bin) |
|
93 | 93 | |
|
94 | 94 | def decompress(bin): |
|
95 | 95 | """ decompress the given input """ |
|
96 | 96 | if not bin: |
|
97 | 97 | return bin |
|
98 | 98 | t = bin[0] |
|
99 | 99 | if t == '\0': |
|
100 | 100 | return bin |
|
101 | 101 | if t == 'x': |
|
102 | 102 | return _decompress(bin) |
|
103 | 103 | if t == 'u': |
|
104 | 104 | return bin[1:] |
|
105 | 105 | raise RevlogError(_("unknown compression type %r") % t) |
|
106 | 106 | |
|
107 | 107 | class lazyparser(object): |
|
108 | 108 | """ |
|
109 | 109 | this class avoids the need to parse the entirety of large indices |
|
110 | 110 | """ |
|
111 | 111 | |
|
112 | 112 | # lazyparser is not safe to use on windows if win32 extensions not |
|
113 | 113 | # available. it keeps file handle open, which make it not possible |
|
114 | 114 | # to break hardlinks on local cloned repos. |
|
115 | 115 | |
|
116 | 116 | def __init__(self, dataf, size): |
|
117 | 117 | self.dataf = dataf |
|
118 | 118 | self.s = struct.calcsize(indexformatng) |
|
119 | 119 | self.datasize = size |
|
120 | 120 | self.l = size/self.s |
|
121 | 121 | self.index = [None] * self.l |
|
122 | 122 | self.map = {nullid: nullrev} |
|
123 | 123 | self.allmap = 0 |
|
124 | 124 | self.all = 0 |
|
125 | 125 | self.mapfind_count = 0 |
|
126 | 126 | |
|
127 | 127 | def loadmap(self): |
|
128 | 128 | """ |
|
129 | 129 | during a commit, we need to make sure the rev being added is |
|
130 | 130 | not a duplicate. This requires loading the entire index, |
|
131 | 131 | which is fairly slow. loadmap can load up just the node map, |
|
132 | 132 | which takes much less time. |
|
133 | 133 | """ |
|
134 | 134 | if self.allmap: |
|
135 | 135 | return |
|
136 | 136 | end = self.datasize |
|
137 | 137 | self.allmap = 1 |
|
138 | 138 | cur = 0 |
|
139 | 139 | count = 0 |
|
140 | 140 | blocksize = self.s * 256 |
|
141 | 141 | self.dataf.seek(0) |
|
142 | 142 | while cur < end: |
|
143 | 143 | data = self.dataf.read(blocksize) |
|
144 | 144 | off = 0 |
|
145 | 145 | for x in xrange(256): |
|
146 | 146 | n = data[off + ngshaoffset:off + ngshaoffset + 20] |
|
147 | 147 | self.map[n] = count |
|
148 | 148 | count += 1 |
|
149 | 149 | if count >= self.l: |
|
150 | 150 | break |
|
151 | 151 | off += self.s |
|
152 | 152 | cur += blocksize |
|
153 | 153 | |
|
154 | 154 | def loadblock(self, blockstart, blocksize, data=None): |
|
155 | 155 | if self.all: |
|
156 | 156 | return |
|
157 | 157 | if data is None: |
|
158 | 158 | self.dataf.seek(blockstart) |
|
159 | 159 | if blockstart + blocksize > self.datasize: |
|
160 | 160 | # the revlog may have grown since we've started running, |
|
161 | 161 | # but we don't have space in self.index for more entries. |
|
162 | 162 | # limit blocksize so that we don't get too much data. |
|
163 | 163 | blocksize = max(self.datasize - blockstart, 0) |
|
164 | 164 | data = self.dataf.read(blocksize) |
|
165 | 165 | lend = len(data) / self.s |
|
166 | 166 | i = blockstart / self.s |
|
167 | 167 | off = 0 |
|
168 | 168 | # lazyindex supports __delitem__ |
|
169 | 169 | if lend > len(self.index) - i: |
|
170 | 170 | lend = len(self.index) - i |
|
171 | 171 | for x in xrange(lend): |
|
172 | 172 | if self.index[i + x] == None: |
|
173 | 173 | b = data[off : off + self.s] |
|
174 | 174 | self.index[i + x] = b |
|
175 | 175 | n = b[ngshaoffset:ngshaoffset + 20] |
|
176 | 176 | self.map[n] = i + x |
|
177 | 177 | off += self.s |
|
178 | 178 | |
|
179 | 179 | def findnode(self, node): |
|
180 | 180 | """search backwards through the index file for a specific node""" |
|
181 | 181 | if self.allmap: |
|
182 | 182 | return None |
|
183 | 183 | |
|
184 | 184 | # hg log will cause many many searches for the manifest |
|
185 | 185 | # nodes. After we get called a few times, just load the whole |
|
186 | 186 | # thing. |
|
187 | 187 | if self.mapfind_count > 8: |
|
188 | 188 | self.loadmap() |
|
189 | 189 | if node in self.map: |
|
190 | 190 | return node |
|
191 | 191 | return None |
|
192 | 192 | self.mapfind_count += 1 |
|
193 | 193 | last = self.l - 1 |
|
194 | 194 | while self.index[last] != None: |
|
195 | 195 | if last == 0: |
|
196 | 196 | self.all = 1 |
|
197 | 197 | self.allmap = 1 |
|
198 | 198 | return None |
|
199 | 199 | last -= 1 |
|
200 | 200 | end = (last + 1) * self.s |
|
201 | 201 | blocksize = self.s * 256 |
|
202 | 202 | while end >= 0: |
|
203 | 203 | start = max(end - blocksize, 0) |
|
204 | 204 | self.dataf.seek(start) |
|
205 | 205 | data = self.dataf.read(end - start) |
|
206 | 206 | findend = end - start |
|
207 | 207 | while True: |
|
208 | 208 | # we're searching backwards, so we have to make sure |
|
209 | 209 | # we don't find a changeset where this node is a parent |
|
210 | 210 | off = data.find(node, 0, findend) |
|
211 | 211 | findend = off |
|
212 | 212 | if off >= 0: |
|
213 | 213 | i = off / self.s |
|
214 | 214 | off = i * self.s |
|
215 | 215 | n = data[off + ngshaoffset:off + ngshaoffset + 20] |
|
216 | 216 | if n == node: |
|
217 | 217 | self.map[n] = i + start / self.s |
|
218 | 218 | return node |
|
219 | 219 | else: |
|
220 | 220 | break |
|
221 | 221 | end -= blocksize |
|
222 | 222 | return None |
|
223 | 223 | |
|
224 | 224 | def loadindex(self, i=None, end=None): |
|
225 | 225 | if self.all: |
|
226 | 226 | return |
|
227 | 227 | all = False |
|
228 | 228 | if i == None: |
|
229 | 229 | blockstart = 0 |
|
230 | 230 | blocksize = (65536 / self.s) * self.s |
|
231 | 231 | end = self.datasize |
|
232 | 232 | all = True |
|
233 | 233 | else: |
|
234 | 234 | if end: |
|
235 | 235 | blockstart = i * self.s |
|
236 | 236 | end = end * self.s |
|
237 | 237 | blocksize = end - blockstart |
|
238 | 238 | else: |
|
239 | 239 | blockstart = (i & ~1023) * self.s |
|
240 | 240 | blocksize = self.s * 1024 |
|
241 | 241 | end = blockstart + blocksize |
|
242 | 242 | while blockstart < end: |
|
243 | 243 | self.loadblock(blockstart, blocksize) |
|
244 | 244 | blockstart += blocksize |
|
245 | 245 | if all: |
|
246 | 246 | self.all = True |
|
247 | 247 | |
|
248 | 248 | class lazyindex(object): |
|
249 | 249 | """a lazy version of the index array""" |
|
250 | 250 | def __init__(self, parser): |
|
251 | 251 | self.p = parser |
|
252 | 252 | def __len__(self): |
|
253 | 253 | return len(self.p.index) |
|
254 | 254 | def load(self, pos): |
|
255 | 255 | if pos < 0: |
|
256 | 256 | pos += len(self.p.index) |
|
257 | 257 | self.p.loadindex(pos) |
|
258 | 258 | return self.p.index[pos] |
|
259 | 259 | def __getitem__(self, pos): |
|
260 | 260 | return _unpack(indexformatng, self.p.index[pos] or self.load(pos)) |
|
261 | 261 | def __setitem__(self, pos, item): |
|
262 | 262 | self.p.index[pos] = _pack(indexformatng, *item) |
|
263 | 263 | def __delitem__(self, pos): |
|
264 | 264 | del self.p.index[pos] |
|
265 | 265 | def insert(self, pos, e): |
|
266 | 266 | self.p.index.insert(pos, _pack(indexformatng, *e)) |
|
267 | 267 | def append(self, e): |
|
268 | 268 | self.p.index.append(_pack(indexformatng, *e)) |
|
269 | 269 | |
|
270 | 270 | class lazymap(object): |
|
271 | 271 | """a lazy version of the node map""" |
|
272 | 272 | def __init__(self, parser): |
|
273 | 273 | self.p = parser |
|
274 | 274 | def load(self, key): |
|
275 | 275 | n = self.p.findnode(key) |
|
276 | 276 | if n == None: |
|
277 | 277 | raise KeyError(key) |
|
278 | 278 | def __contains__(self, key): |
|
279 | 279 | if key in self.p.map: |
|
280 | 280 | return True |
|
281 | 281 | self.p.loadmap() |
|
282 | 282 | return key in self.p.map |
|
283 | 283 | def __iter__(self): |
|
284 | 284 | yield nullid |
|
285 | 285 | for i in xrange(self.p.l): |
|
286 | 286 | ret = self.p.index[i] |
|
287 | 287 | if not ret: |
|
288 | 288 | self.p.loadindex(i) |
|
289 | 289 | ret = self.p.index[i] |
|
290 | 290 | if isinstance(ret, str): |
|
291 | 291 | ret = _unpack(indexformatng, ret) |
|
292 | 292 | yield ret[7] |
|
293 | 293 | def __getitem__(self, key): |
|
294 | 294 | try: |
|
295 | 295 | return self.p.map[key] |
|
296 | 296 | except KeyError: |
|
297 | 297 | try: |
|
298 | 298 | self.load(key) |
|
299 | 299 | return self.p.map[key] |
|
300 | 300 | except KeyError: |
|
301 | 301 | raise KeyError("node " + hex(key)) |
|
302 | 302 | def __setitem__(self, key, val): |
|
303 | 303 | self.p.map[key] = val |
|
304 | 304 | def __delitem__(self, key): |
|
305 | 305 | del self.p.map[key] |
|
306 | 306 | |
|
307 | 307 | indexformatv0 = ">4l20s20s20s" |
|
308 | 308 | v0shaoffset = 56 |
|
309 | 309 | |
|
310 | 310 | class revlogoldio(object): |
|
311 | 311 | def __init__(self): |
|
312 | 312 | self.size = struct.calcsize(indexformatv0) |
|
313 | 313 | |
|
314 | 314 | def parseindex(self, fp, inline): |
|
315 | 315 | s = self.size |
|
316 | 316 | index = [] |
|
317 | 317 | nodemap = {nullid: nullrev} |
|
318 | 318 | n = off = 0 |
|
319 | 319 | data = fp.read() |
|
320 | 320 | l = len(data) |
|
321 | 321 | while off + s <= l: |
|
322 | 322 | cur = data[off:off + s] |
|
323 | 323 | off += s |
|
324 | 324 | e = _unpack(indexformatv0, cur) |
|
325 | 325 | # transform to revlogv1 format |
|
326 | 326 | e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], |
|
327 | 327 | nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) |
|
328 | 328 | index.append(e2) |
|
329 | 329 | nodemap[e[6]] = n |
|
330 | 330 | n += 1 |
|
331 | 331 | |
|
332 | 332 | return index, nodemap, None |
|
333 | 333 | |
|
334 | 334 | def packentry(self, entry, node, version, rev): |
|
335 | 335 | e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], |
|
336 | 336 | node(entry[5]), node(entry[6]), entry[7]) |
|
337 | 337 | return _pack(indexformatv0, *e2) |
|
338 | 338 | |
|
339 | 339 | # index ng: |
|
340 | 340 | # 6 bytes offset |
|
341 | 341 | # 2 bytes flags |
|
342 | 342 | # 4 bytes compressed length |
|
343 | 343 | # 4 bytes uncompressed length |
|
344 | 344 | # 4 bytes: base rev |
|
345 | 345 | # 4 bytes link rev |
|
346 | 346 | # 4 bytes parent 1 rev |
|
347 | 347 | # 4 bytes parent 2 rev |
|
348 | 348 | # 32 bytes: nodeid |
|
349 | 349 | indexformatng = ">Qiiiiii20s12x" |
|
350 | 350 | ngshaoffset = 32 |
|
351 | 351 | versionformat = ">I" |
|
352 | 352 | |
|
353 | 353 | class revlogio(object): |
|
354 | 354 | def __init__(self): |
|
355 | 355 | self.size = struct.calcsize(indexformatng) |
|
356 | 356 | |
|
357 | 357 | def parseindex(self, fp, inline): |
|
358 | 358 | try: |
|
359 | 359 | size = util.fstat(fp).st_size |
|
360 | 360 | except AttributeError: |
|
361 | 361 | size = 0 |
|
362 | 362 | |
|
363 | 363 | if util.openhardlinks() and not inline and size > 1000000: |
|
364 | 364 | # big index, let's parse it on demand |
|
365 | 365 | parser = lazyparser(fp, size) |
|
366 | 366 | index = lazyindex(parser) |
|
367 | 367 | nodemap = lazymap(parser) |
|
368 | 368 | e = list(index[0]) |
|
369 | 369 | type = gettype(e[0]) |
|
370 | 370 | e[0] = offset_type(0, type) |
|
371 | 371 | index[0] = e |
|
372 | 372 | return index, nodemap, None |
|
373 | 373 | |
|
374 | 374 | s = self.size |
|
375 | 375 | cache = None |
|
376 | 376 | index = [] |
|
377 | 377 | nodemap = {nullid: nullrev} |
|
378 | 378 | n = off = 0 |
|
379 | 379 | # if we're not using lazymap, always read the whole index |
|
380 | 380 | data = fp.read() |
|
381 | 381 | l = len(data) - s |
|
382 | 382 | append = index.append |
|
383 | 383 | if inline: |
|
384 | 384 | cache = (0, data) |
|
385 | 385 | while off <= l: |
|
386 | 386 | e = _unpack(indexformatng, data[off:off + s]) |
|
387 | 387 | nodemap[e[7]] = n |
|
388 | 388 | append(e) |
|
389 | 389 | n += 1 |
|
390 | 390 | if e[1] < 0: |
|
391 | 391 | break |
|
392 | 392 | off += e[1] + s |
|
393 | 393 | else: |
|
394 | 394 | while off <= l: |
|
395 | 395 | e = _unpack(indexformatng, data[off:off + s]) |
|
396 | 396 | nodemap[e[7]] = n |
|
397 | 397 | append(e) |
|
398 | 398 | n += 1 |
|
399 | 399 | off += s |
|
400 | 400 | |
|
401 | 401 | e = list(index[0]) |
|
402 | 402 | type = gettype(e[0]) |
|
403 | 403 | e[0] = offset_type(0, type) |
|
404 | 404 | index[0] = e |
|
405 | 405 | |
|
406 | 406 | return index, nodemap, cache |
|
407 | 407 | |
|
408 | 408 | def packentry(self, entry, node, version, rev): |
|
409 | 409 | p = _pack(indexformatng, *entry) |
|
410 | 410 | if rev == 0: |
|
411 | 411 | p = _pack(versionformat, version) + p[4:] |
|
412 | 412 | return p |
|
413 | 413 | |
|
414 | 414 | class revlog(object): |
|
415 | 415 | """ |
|
416 | 416 | the underlying revision storage object |
|
417 | 417 | |
|
418 | 418 | A revlog consists of two parts, an index and the revision data. |
|
419 | 419 | |
|
420 | 420 | The index is a file with a fixed record size containing |
|
421 | 421 | information on each revision, includings its nodeid (hash), the |
|
422 | 422 | nodeids of its parents, the position and offset of its data within |
|
423 | 423 | the data file, and the revision it's based on. Finally, each entry |
|
424 | 424 | contains a linkrev entry that can serve as a pointer to external |
|
425 | 425 | data. |
|
426 | 426 | |
|
427 | 427 | The revision data itself is a linear collection of data chunks. |
|
428 | 428 | Each chunk represents a revision and is usually represented as a |
|
429 | 429 | delta against the previous chunk. To bound lookup time, runs of |
|
430 | 430 | deltas are limited to about 2 times the length of the original |
|
431 | 431 | version data. This makes retrieval of a version proportional to |
|
432 | 432 | its size, or O(1) relative to the number of revisions. |
|
433 | 433 | |
|
434 | 434 | Both pieces of the revlog are written to in an append-only |
|
435 | 435 | fashion, which means we never need to rewrite a file to insert or |
|
436 | 436 | remove data, and can use some simple techniques to avoid the need |
|
437 | 437 | for locking while reading. |
|
438 | 438 | """ |
|
439 | 439 | def __init__(self, opener, indexfile): |
|
440 | 440 | """ |
|
441 | 441 | create a revlog object |
|
442 | 442 | |
|
443 | 443 | opener is a function that abstracts the file opening operation |
|
444 | 444 | and can be used to implement COW semantics or the like. |
|
445 | 445 | """ |
|
446 | 446 | self.indexfile = indexfile |
|
447 | 447 | self.datafile = indexfile[:-2] + ".d" |
|
448 | 448 | self.opener = opener |
|
449 | 449 | self._cache = None |
|
450 | 450 | self._chunkcache = None |
|
451 | 451 | self.nodemap = {nullid: nullrev} |
|
452 | 452 | self.index = [] |
|
453 | 453 | |
|
454 | 454 | v = REVLOG_DEFAULT_VERSION |
|
455 | 455 | if hasattr(opener, "defversion"): |
|
456 | 456 | v = opener.defversion |
|
457 | 457 | if v & REVLOGNG: |
|
458 | 458 | v |= REVLOGNGINLINEDATA |
|
459 | 459 | |
|
460 | 460 | i = "" |
|
461 | 461 | try: |
|
462 | 462 | f = self.opener(self.indexfile) |
|
463 | 463 | i = f.read(4) |
|
464 | 464 | f.seek(0) |
|
465 | 465 | if len(i) > 0: |
|
466 | 466 | v = struct.unpack(versionformat, i)[0] |
|
467 | 467 | except IOError, inst: |
|
468 | 468 | if inst.errno != errno.ENOENT: |
|
469 | 469 | raise |
|
470 | 470 | |
|
471 | 471 | self.version = v |
|
472 | 472 | self._inline = v & REVLOGNGINLINEDATA |
|
473 | 473 | flags = v & ~0xFFFF |
|
474 | 474 | fmt = v & 0xFFFF |
|
475 | 475 | if fmt == REVLOGV0 and flags: |
|
476 | 476 | raise RevlogError(_("index %s unknown flags %#04x for format v0") |
|
477 | 477 | % (self.indexfile, flags >> 16)) |
|
478 | 478 | elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA: |
|
479 | 479 | raise RevlogError(_("index %s unknown flags %#04x for revlogng") |
|
480 | 480 | % (self.indexfile, flags >> 16)) |
|
481 | 481 | elif fmt > REVLOGNG: |
|
482 | 482 | raise RevlogError(_("index %s unknown format %d") |
|
483 | 483 | % (self.indexfile, fmt)) |
|
484 | 484 | |
|
485 | 485 | self._io = revlogio() |
|
486 | 486 | if self.version == REVLOGV0: |
|
487 | 487 | self._io = revlogoldio() |
|
488 | 488 | if i: |
|
489 | 489 | d = self._io.parseindex(f, self._inline) |
|
490 | 490 | self.index, self.nodemap, self._chunkcache = d |
|
491 | 491 | |
|
492 | 492 | # add the magic null revision at -1 |
|
493 | 493 | self.index.append((0, 0, 0, -1, -1, -1, -1, nullid)) |
|
494 | 494 | |
|
495 | 495 | def _loadindex(self, start, end): |
|
496 | 496 | """load a block of indexes all at once from the lazy parser""" |
|
497 | 497 | if isinstance(self.index, lazyindex): |
|
498 | 498 | self.index.p.loadindex(start, end) |
|
499 | 499 | |
|
500 | 500 | def _loadindexmap(self): |
|
501 | 501 | """loads both the map and the index from the lazy parser""" |
|
502 | 502 | if isinstance(self.index, lazyindex): |
|
503 | 503 | p = self.index.p |
|
504 | 504 | p.loadindex() |
|
505 | 505 | self.nodemap = p.map |
|
506 | 506 | |
|
507 | 507 | def _loadmap(self): |
|
508 | 508 | """loads the map from the lazy parser""" |
|
509 | 509 | if isinstance(self.nodemap, lazymap): |
|
510 | 510 | self.nodemap.p.loadmap() |
|
511 | 511 | self.nodemap = self.nodemap.p.map |
|
512 | 512 | |
|
513 | 513 | def tip(self): |
|
514 | 514 | return self.node(len(self.index) - 2) |
|
515 | 515 | def count(self): |
|
516 | 516 | return len(self.index) - 1 |
|
517 | 517 | |
|
518 | 518 | def rev(self, node): |
|
519 | 519 | try: |
|
520 | 520 | return self.nodemap[node] |
|
521 | 521 | except KeyError: |
|
522 |
raise LookupError( |
|
|
522 | raise LookupError(node, self.indexfile, _('no node')) | |
|
523 | 523 | def node(self, rev): |
|
524 | 524 | return self.index[rev][7] |
|
525 | 525 | def linkrev(self, node): |
|
526 | 526 | return self.index[self.rev(node)][4] |
|
527 | 527 | def parents(self, node): |
|
528 | 528 | d = self.index[self.rev(node)][5:7] |
|
529 | 529 | return (self.node(d[0]), self.node(d[1])) |
|
530 | 530 | def parentrevs(self, rev): |
|
531 | 531 | return self.index[rev][5:7] |
|
532 | 532 | def start(self, rev): |
|
533 | 533 | return int(self.index[rev][0] >> 16) |
|
534 | 534 | def end(self, rev): |
|
535 | 535 | return self.start(rev) + self.length(rev) |
|
536 | 536 | def length(self, rev): |
|
537 | 537 | return self.index[rev][1] |
|
538 | 538 | def base(self, rev): |
|
539 | 539 | return self.index[rev][3] |
|
540 | 540 | |
|
541 | 541 | def size(self, rev): |
|
542 | 542 | """return the length of the uncompressed text for a given revision""" |
|
543 | 543 | l = self.index[rev][2] |
|
544 | 544 | if l >= 0: |
|
545 | 545 | return l |
|
546 | 546 | |
|
547 | 547 | t = self.revision(self.node(rev)) |
|
548 | 548 | return len(t) |
|
549 | 549 | |
|
550 | 550 | # alternate implementation, The advantage to this code is it |
|
551 | 551 | # will be faster for a single revision. But, the results are not |
|
552 | 552 | # cached, so finding the size of every revision will be slower. |
|
553 | 553 | """ |
|
554 | 554 | if self.cache and self.cache[1] == rev: |
|
555 | 555 | return len(self.cache[2]) |
|
556 | 556 | |
|
557 | 557 | base = self.base(rev) |
|
558 | 558 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: |
|
559 | 559 | base = self.cache[1] |
|
560 | 560 | text = self.cache[2] |
|
561 | 561 | else: |
|
562 | 562 | text = self.revision(self.node(base)) |
|
563 | 563 | |
|
564 | 564 | l = len(text) |
|
565 | 565 | for x in xrange(base + 1, rev + 1): |
|
566 | 566 | l = mdiff.patchedsize(l, self.chunk(x)) |
|
567 | 567 | return l |
|
568 | 568 | """ |
|
569 | 569 | |
|
570 | 570 | def reachable(self, node, stop=None): |
|
571 | 571 | """return a hash of all nodes ancestral to a given node, including |
|
572 | 572 | the node itself, stopping when stop is matched""" |
|
573 | 573 | reachable = {} |
|
574 | 574 | visit = [node] |
|
575 | 575 | reachable[node] = 1 |
|
576 | 576 | if stop: |
|
577 | 577 | stopn = self.rev(stop) |
|
578 | 578 | else: |
|
579 | 579 | stopn = 0 |
|
580 | 580 | while visit: |
|
581 | 581 | n = visit.pop(0) |
|
582 | 582 | if n == stop: |
|
583 | 583 | continue |
|
584 | 584 | if n == nullid: |
|
585 | 585 | continue |
|
586 | 586 | for p in self.parents(n): |
|
587 | 587 | if self.rev(p) < stopn: |
|
588 | 588 | continue |
|
589 | 589 | if p not in reachable: |
|
590 | 590 | reachable[p] = 1 |
|
591 | 591 | visit.append(p) |
|
592 | 592 | return reachable |
|
593 | 593 | |
|
594 | 594 | def nodesbetween(self, roots=None, heads=None): |
|
595 | 595 | """Return a tuple containing three elements. Elements 1 and 2 contain |
|
596 | 596 | a final list bases and heads after all the unreachable ones have been |
|
597 | 597 | pruned. Element 0 contains a topologically sorted list of all |
|
598 | 598 | |
|
599 | 599 | nodes that satisfy these constraints: |
|
600 | 600 | 1. All nodes must be descended from a node in roots (the nodes on |
|
601 | 601 | roots are considered descended from themselves). |
|
602 | 602 | 2. All nodes must also be ancestors of a node in heads (the nodes in |
|
603 | 603 | heads are considered to be their own ancestors). |
|
604 | 604 | |
|
605 | 605 | If roots is unspecified, nullid is assumed as the only root. |
|
606 | 606 | If heads is unspecified, it is taken to be the output of the |
|
607 | 607 | heads method (i.e. a list of all nodes in the repository that |
|
608 | 608 | have no children).""" |
|
609 | 609 | nonodes = ([], [], []) |
|
610 | 610 | if roots is not None: |
|
611 | 611 | roots = list(roots) |
|
612 | 612 | if not roots: |
|
613 | 613 | return nonodes |
|
614 | 614 | lowestrev = min([self.rev(n) for n in roots]) |
|
615 | 615 | else: |
|
616 | 616 | roots = [nullid] # Everybody's a descendent of nullid |
|
617 | 617 | lowestrev = nullrev |
|
618 | 618 | if (lowestrev == nullrev) and (heads is None): |
|
619 | 619 | # We want _all_ the nodes! |
|
620 | 620 | return ([self.node(r) for r in xrange(0, self.count())], |
|
621 | 621 | [nullid], list(self.heads())) |
|
622 | 622 | if heads is None: |
|
623 | 623 | # All nodes are ancestors, so the latest ancestor is the last |
|
624 | 624 | # node. |
|
625 | 625 | highestrev = self.count() - 1 |
|
626 | 626 | # Set ancestors to None to signal that every node is an ancestor. |
|
627 | 627 | ancestors = None |
|
628 | 628 | # Set heads to an empty dictionary for later discovery of heads |
|
629 | 629 | heads = {} |
|
630 | 630 | else: |
|
631 | 631 | heads = list(heads) |
|
632 | 632 | if not heads: |
|
633 | 633 | return nonodes |
|
634 | 634 | ancestors = {} |
|
635 | 635 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
636 | 636 | # Also, later we will be using it to filter out the heads we can't |
|
637 | 637 | # find from roots. |
|
638 | 638 | heads = dict.fromkeys(heads, 0) |
|
639 | 639 | # Start at the top and keep marking parents until we're done. |
|
640 | 640 | nodestotag = heads.keys() |
|
641 | 641 | # Remember where the top was so we can use it as a limit later. |
|
642 | 642 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
643 | 643 | while nodestotag: |
|
644 | 644 | # grab a node to tag |
|
645 | 645 | n = nodestotag.pop() |
|
646 | 646 | # Never tag nullid |
|
647 | 647 | if n == nullid: |
|
648 | 648 | continue |
|
649 | 649 | # A node's revision number represents its place in a |
|
650 | 650 | # topologically sorted list of nodes. |
|
651 | 651 | r = self.rev(n) |
|
652 | 652 | if r >= lowestrev: |
|
653 | 653 | if n not in ancestors: |
|
654 | 654 | # If we are possibly a descendent of one of the roots |
|
655 | 655 | # and we haven't already been marked as an ancestor |
|
656 | 656 | ancestors[n] = 1 # Mark as ancestor |
|
657 | 657 | # Add non-nullid parents to list of nodes to tag. |
|
658 | 658 | nodestotag.extend([p for p in self.parents(n) if |
|
659 | 659 | p != nullid]) |
|
660 | 660 | elif n in heads: # We've seen it before, is it a fake head? |
|
661 | 661 | # So it is, real heads should not be the ancestors of |
|
662 | 662 | # any other heads. |
|
663 | 663 | heads.pop(n) |
|
664 | 664 | if not ancestors: |
|
665 | 665 | return nonodes |
|
666 | 666 | # Now that we have our set of ancestors, we want to remove any |
|
667 | 667 | # roots that are not ancestors. |
|
668 | 668 | |
|
669 | 669 | # If one of the roots was nullid, everything is included anyway. |
|
670 | 670 | if lowestrev > nullrev: |
|
671 | 671 | # But, since we weren't, let's recompute the lowest rev to not |
|
672 | 672 | # include roots that aren't ancestors. |
|
673 | 673 | |
|
674 | 674 | # Filter out roots that aren't ancestors of heads |
|
675 | 675 | roots = [n for n in roots if n in ancestors] |
|
676 | 676 | # Recompute the lowest revision |
|
677 | 677 | if roots: |
|
678 | 678 | lowestrev = min([self.rev(n) for n in roots]) |
|
679 | 679 | else: |
|
680 | 680 | # No more roots? Return empty list |
|
681 | 681 | return nonodes |
|
682 | 682 | else: |
|
683 | 683 | # We are descending from nullid, and don't need to care about |
|
684 | 684 | # any other roots. |
|
685 | 685 | lowestrev = nullrev |
|
686 | 686 | roots = [nullid] |
|
687 | 687 | # Transform our roots list into a 'set' (i.e. a dictionary where the |
|
688 | 688 | # values don't matter. |
|
689 | 689 | descendents = dict.fromkeys(roots, 1) |
|
690 | 690 | # Also, keep the original roots so we can filter out roots that aren't |
|
691 | 691 | # 'real' roots (i.e. are descended from other roots). |
|
692 | 692 | roots = descendents.copy() |
|
693 | 693 | # Our topologically sorted list of output nodes. |
|
694 | 694 | orderedout = [] |
|
695 | 695 | # Don't start at nullid since we don't want nullid in our output list, |
|
696 | 696 | # and if nullid shows up in descedents, empty parents will look like |
|
697 | 697 | # they're descendents. |
|
698 | 698 | for r in xrange(max(lowestrev, 0), highestrev + 1): |
|
699 | 699 | n = self.node(r) |
|
700 | 700 | isdescendent = False |
|
701 | 701 | if lowestrev == nullrev: # Everybody is a descendent of nullid |
|
702 | 702 | isdescendent = True |
|
703 | 703 | elif n in descendents: |
|
704 | 704 | # n is already a descendent |
|
705 | 705 | isdescendent = True |
|
706 | 706 | # This check only needs to be done here because all the roots |
|
707 | 707 | # will start being marked is descendents before the loop. |
|
708 | 708 | if n in roots: |
|
709 | 709 | # If n was a root, check if it's a 'real' root. |
|
710 | 710 | p = tuple(self.parents(n)) |
|
711 | 711 | # If any of its parents are descendents, it's not a root. |
|
712 | 712 | if (p[0] in descendents) or (p[1] in descendents): |
|
713 | 713 | roots.pop(n) |
|
714 | 714 | else: |
|
715 | 715 | p = tuple(self.parents(n)) |
|
716 | 716 | # A node is a descendent if either of its parents are |
|
717 | 717 | # descendents. (We seeded the dependents list with the roots |
|
718 | 718 | # up there, remember?) |
|
719 | 719 | if (p[0] in descendents) or (p[1] in descendents): |
|
720 | 720 | descendents[n] = 1 |
|
721 | 721 | isdescendent = True |
|
722 | 722 | if isdescendent and ((ancestors is None) or (n in ancestors)): |
|
723 | 723 | # Only include nodes that are both descendents and ancestors. |
|
724 | 724 | orderedout.append(n) |
|
725 | 725 | if (ancestors is not None) and (n in heads): |
|
726 | 726 | # We're trying to figure out which heads are reachable |
|
727 | 727 | # from roots. |
|
728 | 728 | # Mark this head as having been reached |
|
729 | 729 | heads[n] = 1 |
|
730 | 730 | elif ancestors is None: |
|
731 | 731 | # Otherwise, we're trying to discover the heads. |
|
732 | 732 | # Assume this is a head because if it isn't, the next step |
|
733 | 733 | # will eventually remove it. |
|
734 | 734 | heads[n] = 1 |
|
735 | 735 | # But, obviously its parents aren't. |
|
736 | 736 | for p in self.parents(n): |
|
737 | 737 | heads.pop(p, None) |
|
738 | 738 | heads = [n for n in heads.iterkeys() if heads[n] != 0] |
|
739 | 739 | roots = roots.keys() |
|
740 | 740 | assert orderedout |
|
741 | 741 | assert roots |
|
742 | 742 | assert heads |
|
743 | 743 | return (orderedout, roots, heads) |
|
744 | 744 | |
|
745 | 745 | def heads(self, start=None, stop=None): |
|
746 | 746 | """return the list of all nodes that have no children |
|
747 | 747 | |
|
748 | 748 | if start is specified, only heads that are descendants of |
|
749 | 749 | start will be returned |
|
750 | 750 | if stop is specified, it will consider all the revs from stop |
|
751 | 751 | as if they had no children |
|
752 | 752 | """ |
|
753 | 753 | if start is None and stop is None: |
|
754 | 754 | count = self.count() |
|
755 | 755 | if not count: |
|
756 | 756 | return [nullid] |
|
757 | 757 | ishead = [1] * (count + 1) |
|
758 | 758 | index = self.index |
|
759 | 759 | for r in xrange(count): |
|
760 | 760 | e = index[r] |
|
761 | 761 | ishead[e[5]] = ishead[e[6]] = 0 |
|
762 | 762 | return [self.node(r) for r in xrange(count) if ishead[r]] |
|
763 | 763 | |
|
764 | 764 | if start is None: |
|
765 | 765 | start = nullid |
|
766 | 766 | if stop is None: |
|
767 | 767 | stop = [] |
|
768 | 768 | stoprevs = dict.fromkeys([self.rev(n) for n in stop]) |
|
769 | 769 | startrev = self.rev(start) |
|
770 | 770 | reachable = {startrev: 1} |
|
771 | 771 | heads = {startrev: 1} |
|
772 | 772 | |
|
773 | 773 | parentrevs = self.parentrevs |
|
774 | 774 | for r in xrange(startrev + 1, self.count()): |
|
775 | 775 | for p in parentrevs(r): |
|
776 | 776 | if p in reachable: |
|
777 | 777 | if r not in stoprevs: |
|
778 | 778 | reachable[r] = 1 |
|
779 | 779 | heads[r] = 1 |
|
780 | 780 | if p in heads and p not in stoprevs: |
|
781 | 781 | del heads[p] |
|
782 | 782 | |
|
783 | 783 | return [self.node(r) for r in heads] |
|
784 | 784 | |
|
785 | 785 | def children(self, node): |
|
786 | 786 | """find the children of a given node""" |
|
787 | 787 | c = [] |
|
788 | 788 | p = self.rev(node) |
|
789 | 789 | for r in range(p + 1, self.count()): |
|
790 | 790 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] |
|
791 | 791 | if prevs: |
|
792 | 792 | for pr in prevs: |
|
793 | 793 | if pr == p: |
|
794 | 794 | c.append(self.node(r)) |
|
795 | 795 | elif p == nullrev: |
|
796 | 796 | c.append(self.node(r)) |
|
797 | 797 | return c |
|
798 | 798 | |
|
799 | 799 | def _match(self, id): |
|
800 | 800 | if isinstance(id, (long, int)): |
|
801 | 801 | # rev |
|
802 | 802 | return self.node(id) |
|
803 | 803 | if len(id) == 20: |
|
804 | 804 | # possibly a binary node |
|
805 | 805 | # odds of a binary node being all hex in ASCII are 1 in 10**25 |
|
806 | 806 | try: |
|
807 | 807 | node = id |
|
808 | 808 | r = self.rev(node) # quick search the index |
|
809 | 809 | return node |
|
810 | 810 | except LookupError: |
|
811 | 811 | pass # may be partial hex id |
|
812 | 812 | try: |
|
813 | 813 | # str(rev) |
|
814 | 814 | rev = int(id) |
|
815 | 815 | if str(rev) != id: |
|
816 | 816 | raise ValueError |
|
817 | 817 | if rev < 0: |
|
818 | 818 | rev = self.count() + rev |
|
819 | 819 | if rev < 0 or rev >= self.count(): |
|
820 | 820 | raise ValueError |
|
821 | 821 | return self.node(rev) |
|
822 | 822 | except (ValueError, OverflowError): |
|
823 | 823 | pass |
|
824 | 824 | if len(id) == 40: |
|
825 | 825 | try: |
|
826 | 826 | # a full hex nodeid? |
|
827 | 827 | node = bin(id) |
|
828 | 828 | r = self.rev(node) |
|
829 | 829 | return node |
|
830 | 830 | except TypeError: |
|
831 | 831 | pass |
|
832 | 832 | |
|
833 | 833 | def _partialmatch(self, id): |
|
834 | 834 | if len(id) < 40: |
|
835 | 835 | try: |
|
836 | 836 | # hex(node)[:...] |
|
837 | 837 | bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits |
|
838 | 838 | node = None |
|
839 | 839 | for n in self.nodemap: |
|
840 | 840 | if n.startswith(bin_id) and hex(n).startswith(id): |
|
841 | 841 | if node is not None: |
|
842 |
raise LookupError( |
|
|
843 |
_( |
|
|
842 | raise LookupError(id, self.indexfile, | |
|
843 | _('ambiguous identifier')) | |
|
844 | 844 | node = n |
|
845 | 845 | if node is not None: |
|
846 | 846 | return node |
|
847 | 847 | except TypeError: |
|
848 | 848 | pass |
|
849 | 849 | |
|
850 | 850 | def lookup(self, id): |
|
851 | 851 | """locate a node based on: |
|
852 | 852 | - revision number or str(revision number) |
|
853 | 853 | - nodeid or subset of hex nodeid |
|
854 | 854 | """ |
|
855 | 855 | n = self._match(id) |
|
856 | 856 | if n is not None: |
|
857 | 857 | return n |
|
858 | 858 | n = self._partialmatch(id) |
|
859 | 859 | if n: |
|
860 | 860 | return n |
|
861 | 861 | |
|
862 |
raise LookupError(id, _( |
|
|
862 | raise LookupError(id, self.indexfile, _('no match found')) | |
|
863 | 863 | |
|
864 | 864 | def cmp(self, node, text): |
|
865 | 865 | """compare text with a given file revision""" |
|
866 | 866 | p1, p2 = self.parents(node) |
|
867 | 867 | return hash(text, p1, p2) != node |
|
868 | 868 | |
|
869 | 869 | def chunk(self, rev, df=None): |
|
870 | 870 | def loadcache(df): |
|
871 | 871 | if not df: |
|
872 | 872 | if self._inline: |
|
873 | 873 | df = self.opener(self.indexfile) |
|
874 | 874 | else: |
|
875 | 875 | df = self.opener(self.datafile) |
|
876 | 876 | df.seek(start) |
|
877 | 877 | self._chunkcache = (start, df.read(cache_length)) |
|
878 | 878 | |
|
879 | 879 | start, length = self.start(rev), self.length(rev) |
|
880 | 880 | if self._inline: |
|
881 | 881 | start += (rev + 1) * self._io.size |
|
882 | 882 | end = start + length |
|
883 | 883 | |
|
884 | 884 | offset = 0 |
|
885 | 885 | if not self._chunkcache: |
|
886 | 886 | cache_length = max(65536, length) |
|
887 | 887 | loadcache(df) |
|
888 | 888 | else: |
|
889 | 889 | cache_start = self._chunkcache[0] |
|
890 | 890 | cache_length = len(self._chunkcache[1]) |
|
891 | 891 | cache_end = cache_start + cache_length |
|
892 | 892 | if start >= cache_start and end <= cache_end: |
|
893 | 893 | # it is cached |
|
894 | 894 | offset = start - cache_start |
|
895 | 895 | else: |
|
896 | 896 | cache_length = max(65536, length) |
|
897 | 897 | loadcache(df) |
|
898 | 898 | |
|
899 | 899 | # avoid copying large chunks |
|
900 | 900 | c = self._chunkcache[1] |
|
901 | 901 | if cache_length != length: |
|
902 | 902 | c = c[offset:offset + length] |
|
903 | 903 | |
|
904 | 904 | return decompress(c) |
|
905 | 905 | |
|
906 | 906 | def delta(self, node): |
|
907 | 907 | """return or calculate a delta between a node and its predecessor""" |
|
908 | 908 | r = self.rev(node) |
|
909 | 909 | return self.revdiff(r - 1, r) |
|
910 | 910 | |
|
911 | 911 | def revdiff(self, rev1, rev2): |
|
912 | 912 | """return or calculate a delta between two revisions""" |
|
913 | 913 | if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2): |
|
914 | 914 | return self.chunk(rev2) |
|
915 | 915 | |
|
916 | 916 | return mdiff.textdiff(self.revision(self.node(rev1)), |
|
917 | 917 | self.revision(self.node(rev2))) |
|
918 | 918 | |
|
919 | 919 | def revision(self, node): |
|
920 | 920 | """return an uncompressed revision of a given""" |
|
921 | 921 | if node == nullid: |
|
922 | 922 | return "" |
|
923 | 923 | if self._cache and self._cache[0] == node: |
|
924 | 924 | return str(self._cache[2]) |
|
925 | 925 | |
|
926 | 926 | # look up what we need to read |
|
927 | 927 | text = None |
|
928 | 928 | rev = self.rev(node) |
|
929 | 929 | base = self.base(rev) |
|
930 | 930 | |
|
931 | 931 | # check rev flags |
|
932 | 932 | if self.index[rev][0] & 0xFFFF: |
|
933 | 933 | raise RevlogError(_('incompatible revision flag %x') % |
|
934 | 934 | (self.index[rev][0] & 0xFFFF)) |
|
935 | 935 | |
|
936 | 936 | df = None |
|
937 | 937 | |
|
938 | 938 | # do we have useful data cached? |
|
939 | 939 | if self._cache and self._cache[1] >= base and self._cache[1] < rev: |
|
940 | 940 | base = self._cache[1] |
|
941 | 941 | text = str(self._cache[2]) |
|
942 | 942 | self._loadindex(base, rev + 1) |
|
943 | 943 | if not self._inline and rev > base + 1: |
|
944 | 944 | df = self.opener(self.datafile) |
|
945 | 945 | else: |
|
946 | 946 | self._loadindex(base, rev + 1) |
|
947 | 947 | if not self._inline and rev > base: |
|
948 | 948 | df = self.opener(self.datafile) |
|
949 | 949 | text = self.chunk(base, df=df) |
|
950 | 950 | |
|
951 | 951 | bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)] |
|
952 | 952 | text = mdiff.patches(text, bins) |
|
953 | 953 | p1, p2 = self.parents(node) |
|
954 | 954 | if node != hash(text, p1, p2): |
|
955 | 955 | raise RevlogError(_("integrity check failed on %s:%d") |
|
956 | 956 | % (self.datafile, rev)) |
|
957 | 957 | |
|
958 | 958 | self._cache = (node, rev, text) |
|
959 | 959 | return text |
|
960 | 960 | |
|
961 | 961 | def checkinlinesize(self, tr, fp=None): |
|
962 | 962 | if not self._inline: |
|
963 | 963 | return |
|
964 | 964 | if not fp: |
|
965 | 965 | fp = self.opener(self.indexfile, 'r') |
|
966 | 966 | fp.seek(0, 2) |
|
967 | 967 | size = fp.tell() |
|
968 | 968 | if size < 131072: |
|
969 | 969 | return |
|
970 | 970 | trinfo = tr.find(self.indexfile) |
|
971 | 971 | if trinfo == None: |
|
972 | 972 | raise RevlogError(_("%s not found in the transaction") |
|
973 | 973 | % self.indexfile) |
|
974 | 974 | |
|
975 | 975 | trindex = trinfo[2] |
|
976 | 976 | dataoff = self.start(trindex) |
|
977 | 977 | |
|
978 | 978 | tr.add(self.datafile, dataoff) |
|
979 | 979 | df = self.opener(self.datafile, 'w') |
|
980 | 980 | calc = self._io.size |
|
981 | 981 | for r in xrange(self.count()): |
|
982 | 982 | start = self.start(r) + (r + 1) * calc |
|
983 | 983 | length = self.length(r) |
|
984 | 984 | fp.seek(start) |
|
985 | 985 | d = fp.read(length) |
|
986 | 986 | df.write(d) |
|
987 | 987 | fp.close() |
|
988 | 988 | df.close() |
|
989 | 989 | fp = self.opener(self.indexfile, 'w', atomictemp=True) |
|
990 | 990 | self.version &= ~(REVLOGNGINLINEDATA) |
|
991 | 991 | self._inline = False |
|
992 | 992 | for i in xrange(self.count()): |
|
993 | 993 | e = self._io.packentry(self.index[i], self.node, self.version, i) |
|
994 | 994 | fp.write(e) |
|
995 | 995 | |
|
996 | 996 | # if we don't call rename, the temp file will never replace the |
|
997 | 997 | # real index |
|
998 | 998 | fp.rename() |
|
999 | 999 | |
|
1000 | 1000 | tr.replace(self.indexfile, trindex * calc) |
|
1001 | 1001 | self._chunkcache = None |
|
1002 | 1002 | |
|
1003 | 1003 | def addrevision(self, text, transaction, link, p1, p2, d=None): |
|
1004 | 1004 | """add a revision to the log |
|
1005 | 1005 | |
|
1006 | 1006 | text - the revision data to add |
|
1007 | 1007 | transaction - the transaction object used for rollback |
|
1008 | 1008 | link - the linkrev data to add |
|
1009 | 1009 | p1, p2 - the parent nodeids of the revision |
|
1010 | 1010 | d - an optional precomputed delta |
|
1011 | 1011 | """ |
|
1012 | 1012 | dfh = None |
|
1013 | 1013 | if not self._inline: |
|
1014 | 1014 | dfh = self.opener(self.datafile, "a") |
|
1015 | 1015 | ifh = self.opener(self.indexfile, "a+") |
|
1016 | 1016 | return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh) |
|
1017 | 1017 | |
|
1018 | 1018 | def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh): |
|
1019 | 1019 | node = hash(text, p1, p2) |
|
1020 | 1020 | if node in self.nodemap: |
|
1021 | 1021 | return node |
|
1022 | 1022 | |
|
1023 | 1023 | curr = self.count() |
|
1024 | 1024 | prev = curr - 1 |
|
1025 | 1025 | base = self.base(prev) |
|
1026 | 1026 | offset = self.end(prev) |
|
1027 | 1027 | |
|
1028 | 1028 | if curr: |
|
1029 | 1029 | if not d: |
|
1030 | 1030 | ptext = self.revision(self.node(prev)) |
|
1031 | 1031 | d = mdiff.textdiff(ptext, text) |
|
1032 | 1032 | data = compress(d) |
|
1033 | 1033 | l = len(data[1]) + len(data[0]) |
|
1034 | 1034 | dist = l + offset - self.start(base) |
|
1035 | 1035 | |
|
1036 | 1036 | # full versions are inserted when the needed deltas |
|
1037 | 1037 | # become comparable to the uncompressed text |
|
1038 | 1038 | if not curr or dist > len(text) * 2: |
|
1039 | 1039 | data = compress(text) |
|
1040 | 1040 | l = len(data[1]) + len(data[0]) |
|
1041 | 1041 | base = curr |
|
1042 | 1042 | |
|
1043 | 1043 | e = (offset_type(offset, 0), l, len(text), |
|
1044 | 1044 | base, link, self.rev(p1), self.rev(p2), node) |
|
1045 | 1045 | self.index.insert(-1, e) |
|
1046 | 1046 | self.nodemap[node] = curr |
|
1047 | 1047 | |
|
1048 | 1048 | entry = self._io.packentry(e, self.node, self.version, curr) |
|
1049 | 1049 | if not self._inline: |
|
1050 | 1050 | transaction.add(self.datafile, offset) |
|
1051 | 1051 | transaction.add(self.indexfile, curr * len(entry)) |
|
1052 | 1052 | if data[0]: |
|
1053 | 1053 | dfh.write(data[0]) |
|
1054 | 1054 | dfh.write(data[1]) |
|
1055 | 1055 | dfh.flush() |
|
1056 | 1056 | ifh.write(entry) |
|
1057 | 1057 | else: |
|
1058 | 1058 | offset += curr * self._io.size |
|
1059 | 1059 | transaction.add(self.indexfile, offset, curr) |
|
1060 | 1060 | ifh.write(entry) |
|
1061 | 1061 | ifh.write(data[0]) |
|
1062 | 1062 | ifh.write(data[1]) |
|
1063 | 1063 | self.checkinlinesize(transaction, ifh) |
|
1064 | 1064 | |
|
1065 | 1065 | self._cache = (node, curr, text) |
|
1066 | 1066 | return node |
|
1067 | 1067 | |
|
1068 | 1068 | def ancestor(self, a, b): |
|
1069 | 1069 | """calculate the least common ancestor of nodes a and b""" |
|
1070 | 1070 | |
|
1071 | 1071 | def parents(rev): |
|
1072 | 1072 | return [p for p in self.parentrevs(rev) if p != nullrev] |
|
1073 | 1073 | |
|
1074 | 1074 | c = ancestor.ancestor(self.rev(a), self.rev(b), parents) |
|
1075 | 1075 | if c is None: |
|
1076 | 1076 | return nullid |
|
1077 | 1077 | |
|
1078 | 1078 | return self.node(c) |
|
1079 | 1079 | |
|
1080 | 1080 | def group(self, nodelist, lookup, infocollect=None): |
|
1081 | 1081 | """calculate a delta group |
|
1082 | 1082 | |
|
1083 | 1083 | Given a list of changeset revs, return a set of deltas and |
|
1084 | 1084 | metadata corresponding to nodes. the first delta is |
|
1085 | 1085 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to |
|
1086 | 1086 | have this parent as it has all history before these |
|
1087 | 1087 | changesets. parent is parent[0] |
|
1088 | 1088 | """ |
|
1089 | 1089 | revs = [self.rev(n) for n in nodelist] |
|
1090 | 1090 | |
|
1091 | 1091 | # if we don't have any revisions touched by these changesets, bail |
|
1092 | 1092 | if not revs: |
|
1093 | 1093 | yield changegroup.closechunk() |
|
1094 | 1094 | return |
|
1095 | 1095 | |
|
1096 | 1096 | # add the parent of the first rev |
|
1097 | 1097 | p = self.parents(self.node(revs[0]))[0] |
|
1098 | 1098 | revs.insert(0, self.rev(p)) |
|
1099 | 1099 | |
|
1100 | 1100 | # build deltas |
|
1101 | 1101 | for d in xrange(0, len(revs) - 1): |
|
1102 | 1102 | a, b = revs[d], revs[d + 1] |
|
1103 | 1103 | nb = self.node(b) |
|
1104 | 1104 | |
|
1105 | 1105 | if infocollect is not None: |
|
1106 | 1106 | infocollect(nb) |
|
1107 | 1107 | |
|
1108 | 1108 | p = self.parents(nb) |
|
1109 | 1109 | meta = nb + p[0] + p[1] + lookup(nb) |
|
1110 | 1110 | if a == -1: |
|
1111 | 1111 | d = self.revision(nb) |
|
1112 | 1112 | meta += mdiff.trivialdiffheader(len(d)) |
|
1113 | 1113 | else: |
|
1114 | 1114 | d = self.revdiff(a, b) |
|
1115 | 1115 | yield changegroup.chunkheader(len(meta) + len(d)) |
|
1116 | 1116 | yield meta |
|
1117 | 1117 | if len(d) > 2**20: |
|
1118 | 1118 | pos = 0 |
|
1119 | 1119 | while pos < len(d): |
|
1120 | 1120 | pos2 = pos + 2 ** 18 |
|
1121 | 1121 | yield d[pos:pos2] |
|
1122 | 1122 | pos = pos2 |
|
1123 | 1123 | else: |
|
1124 | 1124 | yield d |
|
1125 | 1125 | |
|
1126 | 1126 | yield changegroup.closechunk() |
|
1127 | 1127 | |
|
1128 | 1128 | def addgroup(self, revs, linkmapper, transaction, unique=0): |
|
1129 | 1129 | """ |
|
1130 | 1130 | add a delta group |
|
1131 | 1131 | |
|
1132 | 1132 | given a set of deltas, add them to the revision log. the |
|
1133 | 1133 | first delta is against its parent, which should be in our |
|
1134 | 1134 | log, the rest are against the previous delta. |
|
1135 | 1135 | """ |
|
1136 | 1136 | |
|
1137 | 1137 | #track the base of the current delta log |
|
1138 | 1138 | r = self.count() |
|
1139 | 1139 | t = r - 1 |
|
1140 | 1140 | node = None |
|
1141 | 1141 | |
|
1142 | 1142 | base = prev = nullrev |
|
1143 | 1143 | start = end = textlen = 0 |
|
1144 | 1144 | if r: |
|
1145 | 1145 | end = self.end(t) |
|
1146 | 1146 | |
|
1147 | 1147 | ifh = self.opener(self.indexfile, "a+") |
|
1148 | 1148 | isize = r * self._io.size |
|
1149 | 1149 | if self._inline: |
|
1150 | 1150 | transaction.add(self.indexfile, end + isize, r) |
|
1151 | 1151 | dfh = None |
|
1152 | 1152 | else: |
|
1153 | 1153 | transaction.add(self.indexfile, isize, r) |
|
1154 | 1154 | transaction.add(self.datafile, end) |
|
1155 | 1155 | dfh = self.opener(self.datafile, "a") |
|
1156 | 1156 | |
|
1157 | 1157 | # loop through our set of deltas |
|
1158 | 1158 | chain = None |
|
1159 | 1159 | for chunk in revs: |
|
1160 | 1160 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
1161 | 1161 | link = linkmapper(cs) |
|
1162 | 1162 | if node in self.nodemap: |
|
1163 | 1163 | # this can happen if two branches make the same change |
|
1164 | 1164 | # if unique: |
|
1165 | 1165 | # raise RevlogError(_("already have %s") % hex(node[:4])) |
|
1166 | 1166 | chain = node |
|
1167 | 1167 | continue |
|
1168 | 1168 | delta = buffer(chunk, 80) |
|
1169 | 1169 | del chunk |
|
1170 | 1170 | |
|
1171 | 1171 | for p in (p1, p2): |
|
1172 | 1172 | if not p in self.nodemap: |
|
1173 |
raise LookupError( |
|
|
1173 | raise LookupError(p, self.indexfile, _('unknown parent')) | |
|
1174 | 1174 | |
|
1175 | 1175 | if not chain: |
|
1176 | 1176 | # retrieve the parent revision of the delta chain |
|
1177 | 1177 | chain = p1 |
|
1178 | 1178 | if not chain in self.nodemap: |
|
1179 |
raise LookupError( |
|
|
1179 | raise LookupError(chain, self.indexfile, _('unknown base')) | |
|
1180 | 1180 | |
|
1181 | 1181 | # full versions are inserted when the needed deltas become |
|
1182 | 1182 | # comparable to the uncompressed text or when the previous |
|
1183 | 1183 | # version is not the one we have a delta against. We use |
|
1184 | 1184 | # the size of the previous full rev as a proxy for the |
|
1185 | 1185 | # current size. |
|
1186 | 1186 | |
|
1187 | 1187 | if chain == prev: |
|
1188 | 1188 | cdelta = compress(delta) |
|
1189 | 1189 | cdeltalen = len(cdelta[0]) + len(cdelta[1]) |
|
1190 | 1190 | textlen = mdiff.patchedsize(textlen, delta) |
|
1191 | 1191 | |
|
1192 | 1192 | if chain != prev or (end - start + cdeltalen) > textlen * 2: |
|
1193 | 1193 | # flush our writes here so we can read it in revision |
|
1194 | 1194 | if dfh: |
|
1195 | 1195 | dfh.flush() |
|
1196 | 1196 | ifh.flush() |
|
1197 | 1197 | text = self.revision(chain) |
|
1198 | 1198 | if len(text) == 0: |
|
1199 | 1199 | # skip over trivial delta header |
|
1200 | 1200 | text = buffer(delta, 12) |
|
1201 | 1201 | else: |
|
1202 | 1202 | text = mdiff.patches(text, [delta]) |
|
1203 | 1203 | del delta |
|
1204 | 1204 | chk = self._addrevision(text, transaction, link, p1, p2, None, |
|
1205 | 1205 | ifh, dfh) |
|
1206 | 1206 | if not dfh and not self._inline: |
|
1207 | 1207 | # addrevision switched from inline to conventional |
|
1208 | 1208 | # reopen the index |
|
1209 | 1209 | dfh = self.opener(self.datafile, "a") |
|
1210 | 1210 | ifh = self.opener(self.indexfile, "a") |
|
1211 | 1211 | if chk != node: |
|
1212 | 1212 | raise RevlogError(_("consistency error adding group")) |
|
1213 | 1213 | textlen = len(text) |
|
1214 | 1214 | else: |
|
1215 | 1215 | e = (offset_type(end, 0), cdeltalen, textlen, base, |
|
1216 | 1216 | link, self.rev(p1), self.rev(p2), node) |
|
1217 | 1217 | self.index.insert(-1, e) |
|
1218 | 1218 | self.nodemap[node] = r |
|
1219 | 1219 | entry = self._io.packentry(e, self.node, self.version, r) |
|
1220 | 1220 | if self._inline: |
|
1221 | 1221 | ifh.write(entry) |
|
1222 | 1222 | ifh.write(cdelta[0]) |
|
1223 | 1223 | ifh.write(cdelta[1]) |
|
1224 | 1224 | self.checkinlinesize(transaction, ifh) |
|
1225 | 1225 | if not self._inline: |
|
1226 | 1226 | dfh = self.opener(self.datafile, "a") |
|
1227 | 1227 | ifh = self.opener(self.indexfile, "a") |
|
1228 | 1228 | else: |
|
1229 | 1229 | dfh.write(cdelta[0]) |
|
1230 | 1230 | dfh.write(cdelta[1]) |
|
1231 | 1231 | ifh.write(entry) |
|
1232 | 1232 | |
|
1233 | 1233 | t, r, chain, prev = r, r + 1, node, node |
|
1234 | 1234 | base = self.base(t) |
|
1235 | 1235 | start = self.start(base) |
|
1236 | 1236 | end = self.end(t) |
|
1237 | 1237 | |
|
1238 | 1238 | return node |
|
1239 | 1239 | |
|
1240 | 1240 | def strip(self, minlink): |
|
1241 | 1241 | """truncate the revlog on the first revision with a linkrev >= minlink |
|
1242 | 1242 | |
|
1243 | 1243 | This function is called when we're stripping revision minlink and |
|
1244 | 1244 | its descendants from the repository. |
|
1245 | 1245 | |
|
1246 | 1246 | We have to remove all revisions with linkrev >= minlink, because |
|
1247 | 1247 | the equivalent changelog revisions will be renumbered after the |
|
1248 | 1248 | strip. |
|
1249 | 1249 | |
|
1250 | 1250 | So we truncate the revlog on the first of these revisions, and |
|
1251 | 1251 | trust that the caller has saved the revisions that shouldn't be |
|
1252 | 1252 | removed and that it'll readd them after this truncation. |
|
1253 | 1253 | """ |
|
1254 | 1254 | if self.count() == 0: |
|
1255 | 1255 | return |
|
1256 | 1256 | |
|
1257 | 1257 | if isinstance(self.index, lazyindex): |
|
1258 | 1258 | self._loadindexmap() |
|
1259 | 1259 | |
|
1260 | 1260 | for rev in xrange(0, self.count()): |
|
1261 | 1261 | if self.index[rev][4] >= minlink: |
|
1262 | 1262 | break |
|
1263 | 1263 | else: |
|
1264 | 1264 | return |
|
1265 | 1265 | |
|
1266 | 1266 | # first truncate the files on disk |
|
1267 | 1267 | end = self.start(rev) |
|
1268 | 1268 | if not self._inline: |
|
1269 | 1269 | df = self.opener(self.datafile, "a") |
|
1270 | 1270 | df.truncate(end) |
|
1271 | 1271 | end = rev * self._io.size |
|
1272 | 1272 | else: |
|
1273 | 1273 | end += rev * self._io.size |
|
1274 | 1274 | |
|
1275 | 1275 | indexf = self.opener(self.indexfile, "a") |
|
1276 | 1276 | indexf.truncate(end) |
|
1277 | 1277 | |
|
1278 | 1278 | # then reset internal state in memory to forget those revisions |
|
1279 | 1279 | self._cache = None |
|
1280 | 1280 | self._chunkcache = None |
|
1281 | 1281 | for x in xrange(rev, self.count()): |
|
1282 | 1282 | del self.nodemap[self.node(x)] |
|
1283 | 1283 | |
|
1284 | 1284 | del self.index[rev:-1] |
|
1285 | 1285 | |
|
1286 | 1286 | def checksize(self): |
|
1287 | 1287 | expected = 0 |
|
1288 | 1288 | if self.count(): |
|
1289 | 1289 | expected = max(0, self.end(self.count() - 1)) |
|
1290 | 1290 | |
|
1291 | 1291 | try: |
|
1292 | 1292 | f = self.opener(self.datafile) |
|
1293 | 1293 | f.seek(0, 2) |
|
1294 | 1294 | actual = f.tell() |
|
1295 | 1295 | dd = actual - expected |
|
1296 | 1296 | except IOError, inst: |
|
1297 | 1297 | if inst.errno != errno.ENOENT: |
|
1298 | 1298 | raise |
|
1299 | 1299 | dd = 0 |
|
1300 | 1300 | |
|
1301 | 1301 | try: |
|
1302 | 1302 | f = self.opener(self.indexfile) |
|
1303 | 1303 | f.seek(0, 2) |
|
1304 | 1304 | actual = f.tell() |
|
1305 | 1305 | s = self._io.size |
|
1306 | 1306 | i = max(0, actual / s) |
|
1307 | 1307 | di = actual - (i * s) |
|
1308 | 1308 | if self._inline: |
|
1309 | 1309 | databytes = 0 |
|
1310 | 1310 | for r in xrange(self.count()): |
|
1311 | 1311 | databytes += max(0, self.length(r)) |
|
1312 | 1312 | dd = 0 |
|
1313 | 1313 | di = actual - self.count() * s - databytes |
|
1314 | 1314 | except IOError, inst: |
|
1315 | 1315 | if inst.errno != errno.ENOENT: |
|
1316 | 1316 | raise |
|
1317 | 1317 | di = 0 |
|
1318 | 1318 | |
|
1319 | 1319 | return (dd, di) |
@@ -1,232 +1,232 b'' | |||
|
1 | 1 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
2 | 2 | 1 files updated, 0 files merged, 2 files removed, 0 files unresolved |
|
3 | 3 | rev offset length base linkrev nodeid p1 p2 |
|
4 | 4 | 0 0 3 0 0 362fef284ce2 000000000000 000000000000 |
|
5 | 5 | 1 3 5 1 1 125144f7e028 362fef284ce2 000000000000 |
|
6 | 6 | 2 8 7 2 2 4c982badb186 125144f7e028 000000000000 |
|
7 | 7 | 3 15 9 3 3 19b1fc555737 4c982badb186 000000000000 |
|
8 | 8 | rev offset length base linkrev nodeid p1 p2 |
|
9 | 9 | 0 0 75 0 7 905359268f77 000000000000 000000000000 |
|
10 | 10 | rev offset length base linkrev nodeid p1 p2 |
|
11 | 11 | 0 0 75 0 8 905359268f77 000000000000 000000000000 |
|
12 | 12 | rev offset length base linkrev nodeid p1 p2 |
|
13 | 13 | 0 0 8 0 6 12ab3bcc5ea4 000000000000 000000000000 |
|
14 | 14 | rev offset length base linkrev nodeid p1 p2 |
|
15 | 15 | 0 0 48 0 0 43eadb1d2d06 000000000000 000000000000 |
|
16 | 16 | 1 48 48 1 1 8b89697eba2c 43eadb1d2d06 000000000000 |
|
17 | 17 | 2 96 48 2 2 626a32663c2f 8b89697eba2c 000000000000 |
|
18 | 18 | 3 144 48 3 3 f54c32f13478 626a32663c2f 000000000000 |
|
19 | 19 | 4 192 58 3 6 de68e904d169 626a32663c2f 000000000000 |
|
20 | 20 | 5 250 68 3 7 3b45cc2ab868 de68e904d169 000000000000 |
|
21 | 21 | 6 318 54 6 8 24d86153a002 f54c32f13478 000000000000 |
|
22 | 22 | checking changesets |
|
23 | 23 | checking manifests |
|
24 | 24 | crosschecking files in changesets and manifests |
|
25 | 25 | checking files |
|
26 | 26 | 4 files, 9 changesets, 7 total revisions |
|
27 | 27 | searching for changes |
|
28 | 28 | 1 changesets found |
|
29 | 29 | adding changesets |
|
30 | 30 | adding manifests |
|
31 | 31 | adding file changes |
|
32 | 32 | added 1 changesets with 1 changes to 1 files |
|
33 | 33 | (run 'hg update' to get a working copy) |
|
34 | 34 | checking changesets |
|
35 | 35 | checking manifests |
|
36 | 36 | crosschecking files in changesets and manifests |
|
37 | 37 | checking files |
|
38 | 38 | 1 files, 1 changesets, 1 total revisions |
|
39 | 39 | 0:5649c9d34dd8 |
|
40 | 40 | searching for changes |
|
41 | 41 | 2 changesets found |
|
42 | 42 | adding changesets |
|
43 | 43 | adding manifests |
|
44 | 44 | adding file changes |
|
45 | 45 | added 2 changesets with 2 changes to 1 files |
|
46 | 46 | (run 'hg update' to get a working copy) |
|
47 | 47 | checking changesets |
|
48 | 48 | checking manifests |
|
49 | 49 | crosschecking files in changesets and manifests |
|
50 | 50 | checking files |
|
51 | 51 | 1 files, 2 changesets, 2 total revisions |
|
52 | 52 | 1:10b2180f755b |
|
53 | 53 | searching for changes |
|
54 | 54 | 3 changesets found |
|
55 | 55 | adding changesets |
|
56 | 56 | adding manifests |
|
57 | 57 | adding file changes |
|
58 | 58 | added 3 changesets with 3 changes to 1 files |
|
59 | 59 | (run 'hg update' to get a working copy) |
|
60 | 60 | checking changesets |
|
61 | 61 | checking manifests |
|
62 | 62 | crosschecking files in changesets and manifests |
|
63 | 63 | checking files |
|
64 | 64 | 1 files, 3 changesets, 3 total revisions |
|
65 | 65 | 2:d62976ca1e50 |
|
66 | 66 | searching for changes |
|
67 | 67 | 4 changesets found |
|
68 | 68 | adding changesets |
|
69 | 69 | adding manifests |
|
70 | 70 | adding file changes |
|
71 | 71 | added 4 changesets with 4 changes to 1 files |
|
72 | 72 | (run 'hg update' to get a working copy) |
|
73 | 73 | checking changesets |
|
74 | 74 | checking manifests |
|
75 | 75 | crosschecking files in changesets and manifests |
|
76 | 76 | checking files |
|
77 | 77 | 1 files, 4 changesets, 4 total revisions |
|
78 | 78 | 3:ac69c658229d |
|
79 | 79 | searching for changes |
|
80 | 80 | 2 changesets found |
|
81 | 81 | adding changesets |
|
82 | 82 | adding manifests |
|
83 | 83 | adding file changes |
|
84 | 84 | added 2 changesets with 2 changes to 1 files |
|
85 | 85 | (run 'hg update' to get a working copy) |
|
86 | 86 | checking changesets |
|
87 | 87 | checking manifests |
|
88 | 88 | crosschecking files in changesets and manifests |
|
89 | 89 | checking files |
|
90 | 90 | 1 files, 2 changesets, 2 total revisions |
|
91 | 91 | 1:5f4f3ceb285e |
|
92 | 92 | searching for changes |
|
93 | 93 | 3 changesets found |
|
94 | 94 | adding changesets |
|
95 | 95 | adding manifests |
|
96 | 96 | adding file changes |
|
97 | 97 | added 3 changesets with 3 changes to 1 files |
|
98 | 98 | (run 'hg update' to get a working copy) |
|
99 | 99 | checking changesets |
|
100 | 100 | checking manifests |
|
101 | 101 | crosschecking files in changesets and manifests |
|
102 | 102 | checking files |
|
103 | 103 | 1 files, 3 changesets, 3 total revisions |
|
104 | 104 | 2:024e4e7df376 |
|
105 | 105 | searching for changes |
|
106 | 106 | 4 changesets found |
|
107 | 107 | adding changesets |
|
108 | 108 | adding manifests |
|
109 | 109 | adding file changes |
|
110 | 110 | added 4 changesets with 5 changes to 2 files |
|
111 | 111 | (run 'hg update' to get a working copy) |
|
112 | 112 | checking changesets |
|
113 | 113 | checking manifests |
|
114 | 114 | crosschecking files in changesets and manifests |
|
115 | 115 | checking files |
|
116 | 116 | 2 files, 4 changesets, 5 total revisions |
|
117 | 117 | 3:1e3f6b843bd6 |
|
118 | 118 | searching for changes |
|
119 | 119 | 5 changesets found |
|
120 | 120 | adding changesets |
|
121 | 121 | adding manifests |
|
122 | 122 | adding file changes |
|
123 | 123 | added 5 changesets with 6 changes to 3 files |
|
124 | 124 | (run 'hg update' to get a working copy) |
|
125 | 125 | checking changesets |
|
126 | 126 | checking manifests |
|
127 | 127 | crosschecking files in changesets and manifests |
|
128 | 128 | checking files |
|
129 | 129 | 3 files, 5 changesets, 6 total revisions |
|
130 | 130 | 4:80fe151401c2 |
|
131 | 131 | searching for changes |
|
132 | 132 | 5 changesets found |
|
133 | 133 | adding changesets |
|
134 | 134 | adding manifests |
|
135 | 135 | adding file changes |
|
136 | 136 | added 5 changesets with 5 changes to 2 files |
|
137 | 137 | (run 'hg update' to get a working copy) |
|
138 | 138 | checking changesets |
|
139 | 139 | checking manifests |
|
140 | 140 | crosschecking files in changesets and manifests |
|
141 | 141 | checking files |
|
142 | 142 | 2 files, 5 changesets, 5 total revisions |
|
143 | 143 | 4:836ac62537ab |
|
144 | 144 | pulling from ../test-7 |
|
145 | 145 | searching for changes |
|
146 | 146 | adding changesets |
|
147 | 147 | adding manifests |
|
148 | 148 | adding file changes |
|
149 | 149 | added 4 changesets with 2 changes to 3 files (+1 heads) |
|
150 | 150 | (run 'hg heads' to see heads, 'hg merge' to merge) |
|
151 | 151 | checking changesets |
|
152 | 152 | checking manifests |
|
153 | 153 | crosschecking files in changesets and manifests |
|
154 | 154 | checking files |
|
155 | 155 | 4 files, 9 changesets, 7 total revisions |
|
156 | 156 | rolling back last transaction |
|
157 | 157 | % should fail |
|
158 | 158 | abort: --base is incompatible with specifiying a destination |
|
159 | 159 | abort: repository default-push not found! |
|
160 | 160 | 2 changesets found |
|
161 | 161 | 4 changesets found |
|
162 | 162 | 6 changesets found |
|
163 | 163 | 1 changesets found |
|
164 | 164 | 1 changesets found |
|
165 | 165 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
166 | 166 | % 2 |
|
167 | 167 | 2:d62976ca1e50 |
|
168 | 168 | adding changesets |
|
169 | 169 | transaction abort! |
|
170 | 170 | rollback completed |
|
171 | abort: unknown parent ac69c658229d! | |
|
171 | abort: 00changelog.i@ac69c658229d: unknown parent! | |
|
172 | 172 | % 2 |
|
173 | 173 | 2:d62976ca1e50 |
|
174 | 174 | adding changesets |
|
175 | 175 | adding manifests |
|
176 | 176 | adding file changes |
|
177 | 177 | added 6 changesets with 4 changes to 4 files (+1 heads) |
|
178 | 178 | (run 'hg heads' to see heads, 'hg merge' to merge) |
|
179 | 179 | % 8 |
|
180 | 180 | 8:836ac62537ab |
|
181 | 181 | checking changesets |
|
182 | 182 | checking manifests |
|
183 | 183 | crosschecking files in changesets and manifests |
|
184 | 184 | checking files |
|
185 | 185 | 4 files, 9 changesets, 7 total revisions |
|
186 | 186 | rolling back last transaction |
|
187 | 187 | % 2 |
|
188 | 188 | 2:d62976ca1e50 |
|
189 | 189 | adding changesets |
|
190 | 190 | adding manifests |
|
191 | 191 | adding file changes |
|
192 | 192 | added 2 changesets with 2 changes to 2 files |
|
193 | 193 | (run 'hg update' to get a working copy) |
|
194 | 194 | % 4 |
|
195 | 195 | 4:836ac62537ab |
|
196 | 196 | checking changesets |
|
197 | 197 | checking manifests |
|
198 | 198 | crosschecking files in changesets and manifests |
|
199 | 199 | checking files |
|
200 | 200 | 2 files, 5 changesets, 5 total revisions |
|
201 | 201 | rolling back last transaction |
|
202 | 202 | adding changesets |
|
203 | 203 | adding manifests |
|
204 | 204 | adding file changes |
|
205 | 205 | added 4 changesets with 3 changes to 3 files (+1 heads) |
|
206 | 206 | (run 'hg heads' to see heads, 'hg merge' to merge) |
|
207 | 207 | % 6 |
|
208 | 208 | 6:80fe151401c2 |
|
209 | 209 | checking changesets |
|
210 | 210 | checking manifests |
|
211 | 211 | crosschecking files in changesets and manifests |
|
212 | 212 | checking files |
|
213 | 213 | 3 files, 7 changesets, 6 total revisions |
|
214 | 214 | warning: detected divergent renames of afile to: |
|
215 | 215 | anotherfile |
|
216 | 216 | adifferentfile |
|
217 | 217 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
218 | 218 | (branch merge, don't forget to commit) |
|
219 | 219 | 7 changesets found |
|
220 | 220 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
221 | 221 | adding changesets |
|
222 | 222 | adding manifests |
|
223 | 223 | adding file changes |
|
224 | 224 | added 7 changesets with 4 changes to 4 files |
|
225 | 225 | (run 'hg update' to get a working copy) |
|
226 | 226 | % 9 |
|
227 | 227 | 9:607fe5912aad |
|
228 | 228 | checking changesets |
|
229 | 229 | checking manifests |
|
230 | 230 | crosschecking files in changesets and manifests |
|
231 | 231 | checking files |
|
232 | 232 | 4 files, 10 changesets, 7 total revisions |
@@ -1,22 +1,22 b'' | |||
|
1 | 1 | diff -r acd8075edac9 b |
|
2 | 2 | --- /dev/null |
|
3 | 3 | +++ b/b |
|
4 | 4 | @@ -0,0 +1,1 @@ |
|
5 | 5 | +123 |
|
6 | 6 | diff -r acd8075edac9 b |
|
7 | 7 | --- /dev/null |
|
8 | 8 | +++ b/b |
|
9 | 9 | @@ -0,0 +1,1 @@ |
|
10 | 10 | +123 |
|
11 | 11 | diff -r acd8075edac9 a |
|
12 | 12 | --- a/a |
|
13 | 13 | +++ b/a |
|
14 | 14 | @@ -0,0 +1,1 @@ |
|
15 | 15 | +foo |
|
16 | 16 | diff -r acd8075edac9 b |
|
17 | 17 | --- /dev/null |
|
18 | 18 | +++ b/b |
|
19 | 19 | @@ -0,0 +1,1 @@ |
|
20 | 20 | +123 |
|
21 |
abort: |
|
|
22 |
abort: |
|
|
21 | abort: 00changelog.i@: ambiguous identifier! | |
|
22 | abort: 00changelog.i@: ambiguous identifier! |
@@ -1,50 +1,50 b'' | |||
|
1 | 1 | adding bar |
|
2 | 2 | adding foo |
|
3 | 3 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
4 | 4 | % start imerge |
|
5 | 5 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
6 | 6 | (branch merge, don't forget to commit) |
|
7 | 7 | U foo |
|
8 | 8 | foo |
|
9 | 9 | bar |
|
10 | 10 | bar |
|
11 | 11 | bar |
|
12 | 12 | % status -v |
|
13 | 13 | merging e6da46716401 and 30d266f502e7 |
|
14 | 14 | U foo (foo2) |
|
15 | 15 | % next |
|
16 | 16 | foo |
|
17 | 17 | % merge next |
|
18 | 18 | merging foo and foo2 |
|
19 | 19 | all conflicts resolved |
|
20 | 20 | % unresolve |
|
21 | 21 | % merge foo |
|
22 | 22 | merging foo and foo2 |
|
23 | 23 | all conflicts resolved |
|
24 | 24 | % save |
|
25 | 25 | % load |
|
26 | 26 | 2 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
27 | 27 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
28 | 28 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
29 | 29 | (branch merge, don't forget to commit) |
|
30 | 30 | R foo |
|
31 | 31 | all conflicts resolved |
|
32 | 32 | foo |
|
33 | 33 | changeset: 3:fa9a6defdcaf |
|
34 | 34 | tag: tip |
|
35 | 35 | parent: 2:e6da46716401 |
|
36 | 36 | parent: 1:30d266f502e7 |
|
37 | 37 | user: test |
|
38 | 38 | date: Thu Jan 01 00:00:03 1970 +0000 |
|
39 | 39 | files: foo foo2 |
|
40 | 40 | description: |
|
41 | 41 | merged |
|
42 | 42 | |
|
43 | 43 | |
|
44 | 44 | % nothing to merge -- tip |
|
45 | 45 | abort: there is nothing to merge |
|
46 | 46 | 2 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
47 | 47 | % nothing to merge |
|
48 | 48 | abort: there is nothing to merge - use "hg update" instead |
|
49 | 49 | % load unknown parent |
|
50 |
abort: merge parent e6da46716401 |
|
|
50 | abort: merge parent e6da46716401 not in repository |
@@ -1,223 +1,223 b'' | |||
|
1 | 1 | adding a |
|
2 | 2 | changeset: 0:8580ff50825a |
|
3 | 3 | user: test |
|
4 | 4 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
5 | 5 | summary: a |
|
6 | 6 | |
|
7 | 7 | % -f, directory |
|
8 | 8 | abort: can only follow copies/renames for explicit file names |
|
9 | 9 | % -f, but no args |
|
10 | 10 | changeset: 4:b30c444c7c84 |
|
11 | 11 | tag: tip |
|
12 | 12 | user: test |
|
13 | 13 | date: Thu Jan 01 00:00:05 1970 +0000 |
|
14 | 14 | summary: e |
|
15 | 15 | |
|
16 | 16 | changeset: 3:16b60bf3f99a |
|
17 | 17 | user: test |
|
18 | 18 | date: Thu Jan 01 00:00:04 1970 +0000 |
|
19 | 19 | summary: d |
|
20 | 20 | |
|
21 | 21 | changeset: 2:21fba396af4c |
|
22 | 22 | user: test |
|
23 | 23 | date: Thu Jan 01 00:00:03 1970 +0000 |
|
24 | 24 | summary: c |
|
25 | 25 | |
|
26 | 26 | changeset: 1:c0296dabce9b |
|
27 | 27 | user: test |
|
28 | 28 | date: Thu Jan 01 00:00:02 1970 +0000 |
|
29 | 29 | summary: b |
|
30 | 30 | |
|
31 | 31 | changeset: 0:8580ff50825a |
|
32 | 32 | user: test |
|
33 | 33 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
34 | 34 | summary: a |
|
35 | 35 | |
|
36 | 36 | % one rename |
|
37 | 37 | changeset: 0:8580ff50825a |
|
38 | 38 | user: test |
|
39 | 39 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
40 | 40 | files: a |
|
41 | 41 | description: |
|
42 | 42 | a |
|
43 | 43 | |
|
44 | 44 | |
|
45 | 45 | % many renames |
|
46 | 46 | changeset: 4:b30c444c7c84 |
|
47 | 47 | tag: tip |
|
48 | 48 | user: test |
|
49 | 49 | date: Thu Jan 01 00:00:05 1970 +0000 |
|
50 | 50 | files: dir/b e |
|
51 | 51 | description: |
|
52 | 52 | e |
|
53 | 53 | |
|
54 | 54 | |
|
55 | 55 | changeset: 2:21fba396af4c |
|
56 | 56 | user: test |
|
57 | 57 | date: Thu Jan 01 00:00:03 1970 +0000 |
|
58 | 58 | files: b dir/b |
|
59 | 59 | description: |
|
60 | 60 | c |
|
61 | 61 | |
|
62 | 62 | |
|
63 | 63 | changeset: 1:c0296dabce9b |
|
64 | 64 | user: test |
|
65 | 65 | date: Thu Jan 01 00:00:02 1970 +0000 |
|
66 | 66 | files: b |
|
67 | 67 | description: |
|
68 | 68 | b |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | changeset: 0:8580ff50825a |
|
72 | 72 | user: test |
|
73 | 73 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
74 | 74 | files: a |
|
75 | 75 | description: |
|
76 | 76 | a |
|
77 | 77 | |
|
78 | 78 | |
|
79 | 79 | % log copies |
|
80 | 80 | 4 e (dir/b) |
|
81 | 81 | 3 b (a) |
|
82 | 82 | 2 dir/b (b) |
|
83 | 83 | 1 b (a) |
|
84 | 84 | 0 |
|
85 | 85 | % log copies, non-linear manifest |
|
86 | 86 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
87 | 87 | adding foo |
|
88 | 88 | 5 e (dir/b) |
|
89 | 89 | % log copies, execute bit set |
|
90 | 90 | 6 |
|
91 | 91 | % log -p d |
|
92 | 92 | changeset: 3:16b60bf3f99a |
|
93 | 93 | user: test |
|
94 | 94 | date: Thu Jan 01 00:00:04 1970 +0000 |
|
95 | 95 | files: a b d |
|
96 | 96 | description: |
|
97 | 97 | d |
|
98 | 98 | |
|
99 | 99 | |
|
100 | 100 | diff -r 21fba396af4c -r 16b60bf3f99a d |
|
101 | 101 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
102 | 102 | +++ b/d Thu Jan 01 00:00:04 1970 +0000 |
|
103 | 103 | @@ -0,0 +1,1 @@ |
|
104 | 104 | +a |
|
105 | 105 | |
|
106 | 106 | adding base |
|
107 | 107 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
108 | 108 | adding b1 |
|
109 | 109 | % log -f |
|
110 | 110 | changeset: 3:e62f78d544b4 |
|
111 | 111 | tag: tip |
|
112 | 112 | parent: 1:3d5bf5654eda |
|
113 | 113 | user: test |
|
114 | 114 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
115 | 115 | summary: b1 |
|
116 | 116 | |
|
117 | 117 | changeset: 1:3d5bf5654eda |
|
118 | 118 | user: test |
|
119 | 119 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
120 | 120 | summary: r1 |
|
121 | 121 | |
|
122 | 122 | changeset: 0:67e992f2c4f3 |
|
123 | 123 | user: test |
|
124 | 124 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
125 | 125 | summary: base |
|
126 | 126 | |
|
127 | 127 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
128 | 128 | adding b2 |
|
129 | 129 | % log -f -r 1:tip |
|
130 | 130 | changeset: 1:3d5bf5654eda |
|
131 | 131 | user: test |
|
132 | 132 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
133 | 133 | summary: r1 |
|
134 | 134 | |
|
135 | 135 | changeset: 2:60c670bf5b30 |
|
136 | 136 | user: test |
|
137 | 137 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
138 | 138 | summary: r2 |
|
139 | 139 | |
|
140 | 140 | changeset: 3:e62f78d544b4 |
|
141 | 141 | parent: 1:3d5bf5654eda |
|
142 | 142 | user: test |
|
143 | 143 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
144 | 144 | summary: b1 |
|
145 | 145 | |
|
146 | 146 | 2 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
147 | 147 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
148 | 148 | (branch merge, don't forget to commit) |
|
149 | 149 | % log -r . with two parents |
|
150 | 150 | warning: working directory has two parents, tag '.' uses the first |
|
151 | 151 | changeset: 3:e62f78d544b4 |
|
152 | 152 | parent: 1:3d5bf5654eda |
|
153 | 153 | user: test |
|
154 | 154 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
155 | 155 | summary: b1 |
|
156 | 156 | |
|
157 | 157 | % log -r . with one parent |
|
158 | 158 | changeset: 5:302e9dd6890d |
|
159 | 159 | tag: tip |
|
160 | 160 | parent: 3:e62f78d544b4 |
|
161 | 161 | parent: 4:ddb82e70d1a1 |
|
162 | 162 | user: test |
|
163 | 163 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
164 | 164 | summary: m12 |
|
165 | 165 | |
|
166 | 166 | % log --follow-first |
|
167 | 167 | changeset: 6:2404bbcab562 |
|
168 | 168 | tag: tip |
|
169 | 169 | user: test |
|
170 | 170 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
171 | 171 | summary: b1.1 |
|
172 | 172 | |
|
173 | 173 | changeset: 5:302e9dd6890d |
|
174 | 174 | parent: 3:e62f78d544b4 |
|
175 | 175 | parent: 4:ddb82e70d1a1 |
|
176 | 176 | user: test |
|
177 | 177 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
178 | 178 | summary: m12 |
|
179 | 179 | |
|
180 | 180 | changeset: 3:e62f78d544b4 |
|
181 | 181 | parent: 1:3d5bf5654eda |
|
182 | 182 | user: test |
|
183 | 183 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
184 | 184 | summary: b1 |
|
185 | 185 | |
|
186 | 186 | changeset: 1:3d5bf5654eda |
|
187 | 187 | user: test |
|
188 | 188 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
189 | 189 | summary: r1 |
|
190 | 190 | |
|
191 | 191 | changeset: 0:67e992f2c4f3 |
|
192 | 192 | user: test |
|
193 | 193 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
194 | 194 | summary: base |
|
195 | 195 | |
|
196 | 196 | % log -P 2 |
|
197 | 197 | changeset: 6:2404bbcab562 |
|
198 | 198 | tag: tip |
|
199 | 199 | user: test |
|
200 | 200 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
201 | 201 | summary: b1.1 |
|
202 | 202 | |
|
203 | 203 | changeset: 5:302e9dd6890d |
|
204 | 204 | parent: 3:e62f78d544b4 |
|
205 | 205 | parent: 4:ddb82e70d1a1 |
|
206 | 206 | user: test |
|
207 | 207 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
208 | 208 | summary: m12 |
|
209 | 209 | |
|
210 | 210 | changeset: 4:ddb82e70d1a1 |
|
211 | 211 | parent: 0:67e992f2c4f3 |
|
212 | 212 | user: test |
|
213 | 213 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
214 | 214 | summary: b2 |
|
215 | 215 | |
|
216 | 216 | changeset: 3:e62f78d544b4 |
|
217 | 217 | parent: 1:3d5bf5654eda |
|
218 | 218 | user: test |
|
219 | 219 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
220 | 220 | summary: b1 |
|
221 | 221 | |
|
222 | 222 | % log -r "" |
|
223 |
abort: |
|
|
223 | abort: 00changelog.i@: ambiguous identifier! |
General Comments 0
You need to be logged in to leave comments.
Login now