##// END OF EJS Templates
revlog: report node and file when lookup fails
Matt Mackall -
r6228:c0c4c7b1 default
parent child Browse files
Show More
@@ -1,406 +1,407 b''
1 # Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
1 # Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
2 # Published under the GNU GPL
2 # Published under the GNU GPL
3
3
4 '''
4 '''
5 imerge - interactive merge
5 imerge - interactive merge
6 '''
6 '''
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex, short
9 from mercurial.node import hex, short
10 from mercurial import commands, cmdutil, dispatch, fancyopts
10 from mercurial import commands, cmdutil, dispatch, fancyopts
11 from mercurial import hg, filemerge, util, revlog
11 from mercurial import hg, filemerge, util, revlog
12 import os, tarfile
12 import os, tarfile
13
13
14 class InvalidStateFileException(Exception): pass
14 class InvalidStateFileException(Exception): pass
15
15
16 class ImergeStateFile(object):
16 class ImergeStateFile(object):
17 def __init__(self, im):
17 def __init__(self, im):
18 self.im = im
18 self.im = im
19
19
20 def save(self, dest):
20 def save(self, dest):
21 tf = tarfile.open(dest, 'w:gz')
21 tf = tarfile.open(dest, 'w:gz')
22
22
23 st = os.path.join(self.im.path, 'status')
23 st = os.path.join(self.im.path, 'status')
24 tf.add(st, os.path.join('.hg', 'imerge', 'status'))
24 tf.add(st, os.path.join('.hg', 'imerge', 'status'))
25
25
26 for f in self.im.resolved:
26 for f in self.im.resolved:
27 (fd, fo) = self.im.conflicts[f]
27 (fd, fo) = self.im.conflicts[f]
28 abssrc = self.im.repo.wjoin(fd)
28 abssrc = self.im.repo.wjoin(fd)
29 tf.add(abssrc, fd)
29 tf.add(abssrc, fd)
30
30
31 tf.close()
31 tf.close()
32
32
33 def load(self, source):
33 def load(self, source):
34 wlock = self.im.repo.wlock()
34 wlock = self.im.repo.wlock()
35 lock = self.im.repo.lock()
35 lock = self.im.repo.lock()
36
36
37 tf = tarfile.open(source, 'r')
37 tf = tarfile.open(source, 'r')
38 contents = tf.getnames()
38 contents = tf.getnames()
39 # tarfile normalizes path separators to '/'
39 # tarfile normalizes path separators to '/'
40 statusfile = '.hg/imerge/status'
40 statusfile = '.hg/imerge/status'
41 if statusfile not in contents:
41 if statusfile not in contents:
42 raise InvalidStateFileException('no status file')
42 raise InvalidStateFileException('no status file')
43
43
44 tf.extract(statusfile, self.im.repo.root)
44 tf.extract(statusfile, self.im.repo.root)
45 p1, p2 = self.im.load()
45 p1, p2 = self.im.load()
46 if self.im.repo.dirstate.parents()[0] != p1.node():
46 if self.im.repo.dirstate.parents()[0] != p1.node():
47 hg.clean(self.im.repo, p1.node())
47 hg.clean(self.im.repo, p1.node())
48 self.im.start(p2.node())
48 self.im.start(p2.node())
49 for tarinfo in tf:
49 for tarinfo in tf:
50 tf.extract(tarinfo, self.im.repo.root)
50 tf.extract(tarinfo, self.im.repo.root)
51 self.im.load()
51 self.im.load()
52
52
53 class Imerge(object):
53 class Imerge(object):
54 def __init__(self, ui, repo):
54 def __init__(self, ui, repo):
55 self.ui = ui
55 self.ui = ui
56 self.repo = repo
56 self.repo = repo
57
57
58 self.path = repo.join('imerge')
58 self.path = repo.join('imerge')
59 self.opener = util.opener(self.path)
59 self.opener = util.opener(self.path)
60
60
61 self.wctx = self.repo.workingctx()
61 self.wctx = self.repo.workingctx()
62 self.conflicts = {}
62 self.conflicts = {}
63 self.resolved = []
63 self.resolved = []
64
64
65 def merging(self):
65 def merging(self):
66 return len(self.wctx.parents()) > 1
66 return len(self.wctx.parents()) > 1
67
67
68 def load(self):
68 def load(self):
69 # status format. \0-delimited file, fields are
69 # status format. \0-delimited file, fields are
70 # p1, p2, conflict count, conflict filenames, resolved filenames
70 # p1, p2, conflict count, conflict filenames, resolved filenames
71 # conflict filenames are tuples of localname, remoteorig, remotenew
71 # conflict filenames are tuples of localname, remoteorig, remotenew
72
72
73 statusfile = self.opener('status')
73 statusfile = self.opener('status')
74
74
75 status = statusfile.read().split('\0')
75 status = statusfile.read().split('\0')
76 if len(status) < 3:
76 if len(status) < 3:
77 raise util.Abort('invalid imerge status file')
77 raise util.Abort('invalid imerge status file')
78
78
79 try:
79 try:
80 parents = [self.repo.changectx(n) for n in status[:2]]
80 parents = [self.repo.changectx(n) for n in status[:2]]
81 except revlog.LookupError, e:
81 except revlog.LookupError, e:
82 raise util.Abort('merge parent %s not in repository' % e.name)
82 raise util.Abort(_('merge parent %s not in repository') %
83 short(e.name))
83
84
84 status = status[2:]
85 status = status[2:]
85 conflicts = int(status.pop(0)) * 3
86 conflicts = int(status.pop(0)) * 3
86 self.resolved = status[conflicts:]
87 self.resolved = status[conflicts:]
87 for i in xrange(0, conflicts, 3):
88 for i in xrange(0, conflicts, 3):
88 self.conflicts[status[i]] = (status[i+1], status[i+2])
89 self.conflicts[status[i]] = (status[i+1], status[i+2])
89
90
90 return parents
91 return parents
91
92
92 def save(self):
93 def save(self):
93 lock = self.repo.lock()
94 lock = self.repo.lock()
94
95
95 if not os.path.isdir(self.path):
96 if not os.path.isdir(self.path):
96 os.mkdir(self.path)
97 os.mkdir(self.path)
97 statusfile = self.opener('status', 'wb')
98 statusfile = self.opener('status', 'wb')
98
99
99 out = [hex(n.node()) for n in self.wctx.parents()]
100 out = [hex(n.node()) for n in self.wctx.parents()]
100 out.append(str(len(self.conflicts)))
101 out.append(str(len(self.conflicts)))
101 conflicts = self.conflicts.items()
102 conflicts = self.conflicts.items()
102 conflicts.sort()
103 conflicts.sort()
103 for fw, fd_fo in conflicts:
104 for fw, fd_fo in conflicts:
104 out.append(fw)
105 out.append(fw)
105 out.extend(fd_fo)
106 out.extend(fd_fo)
106 out.extend(self.resolved)
107 out.extend(self.resolved)
107
108
108 statusfile.write('\0'.join(out))
109 statusfile.write('\0'.join(out))
109
110
110 def remaining(self):
111 def remaining(self):
111 return [f for f in self.conflicts if f not in self.resolved]
112 return [f for f in self.conflicts if f not in self.resolved]
112
113
113 def filemerge(self, fn, interactive=True):
114 def filemerge(self, fn, interactive=True):
114 wlock = self.repo.wlock()
115 wlock = self.repo.wlock()
115
116
116 (fd, fo) = self.conflicts[fn]
117 (fd, fo) = self.conflicts[fn]
117 p1, p2 = self.wctx.parents()
118 p1, p2 = self.wctx.parents()
118
119
119 # this could be greatly improved
120 # this could be greatly improved
120 realmerge = os.environ.get('HGMERGE')
121 realmerge = os.environ.get('HGMERGE')
121 if not interactive:
122 if not interactive:
122 os.environ['HGMERGE'] = 'merge'
123 os.environ['HGMERGE'] = 'merge'
123
124
124 # The filemerge ancestor algorithm does not work if self.wctx
125 # The filemerge ancestor algorithm does not work if self.wctx
125 # already has two parents (in normal merge it doesn't yet). But
126 # already has two parents (in normal merge it doesn't yet). But
126 # this is very dirty.
127 # this is very dirty.
127 self.wctx._parents.pop()
128 self.wctx._parents.pop()
128 try:
129 try:
129 # TODO: we should probably revert the file if merge fails
130 # TODO: we should probably revert the file if merge fails
130 return filemerge.filemerge(self.repo, fn, fd, fo, self.wctx, p2)
131 return filemerge.filemerge(self.repo, fn, fd, fo, self.wctx, p2)
131 finally:
132 finally:
132 self.wctx._parents.append(p2)
133 self.wctx._parents.append(p2)
133 if realmerge:
134 if realmerge:
134 os.environ['HGMERGE'] = realmerge
135 os.environ['HGMERGE'] = realmerge
135 elif not interactive:
136 elif not interactive:
136 del os.environ['HGMERGE']
137 del os.environ['HGMERGE']
137
138
138 def start(self, rev=None):
139 def start(self, rev=None):
139 _filemerge = filemerge.filemerge
140 _filemerge = filemerge.filemerge
140 def filemerge_(repo, fw, fd, fo, wctx, mctx):
141 def filemerge_(repo, fw, fd, fo, wctx, mctx):
141 self.conflicts[fw] = (fd, fo)
142 self.conflicts[fw] = (fd, fo)
142
143
143 filemerge.filemerge = filemerge_
144 filemerge.filemerge = filemerge_
144 commands.merge(self.ui, self.repo, rev=rev)
145 commands.merge(self.ui, self.repo, rev=rev)
145 filemerge.filemerge = _filemerge
146 filemerge.filemerge = _filemerge
146
147
147 self.wctx = self.repo.workingctx()
148 self.wctx = self.repo.workingctx()
148 self.save()
149 self.save()
149
150
150 def resume(self):
151 def resume(self):
151 self.load()
152 self.load()
152
153
153 dp = self.repo.dirstate.parents()
154 dp = self.repo.dirstate.parents()
154 p1, p2 = self.wctx.parents()
155 p1, p2 = self.wctx.parents()
155 if p1.node() != dp[0] or p2.node() != dp[1]:
156 if p1.node() != dp[0] or p2.node() != dp[1]:
156 raise util.Abort('imerge state does not match working directory')
157 raise util.Abort('imerge state does not match working directory')
157
158
158 def next(self):
159 def next(self):
159 remaining = self.remaining()
160 remaining = self.remaining()
160 return remaining and remaining[0]
161 return remaining and remaining[0]
161
162
162 def resolve(self, files):
163 def resolve(self, files):
163 resolved = dict.fromkeys(self.resolved)
164 resolved = dict.fromkeys(self.resolved)
164 for fn in files:
165 for fn in files:
165 if fn not in self.conflicts:
166 if fn not in self.conflicts:
166 raise util.Abort('%s is not in the merge set' % fn)
167 raise util.Abort('%s is not in the merge set' % fn)
167 resolved[fn] = True
168 resolved[fn] = True
168 self.resolved = resolved.keys()
169 self.resolved = resolved.keys()
169 self.resolved.sort()
170 self.resolved.sort()
170 self.save()
171 self.save()
171 return 0
172 return 0
172
173
173 def unresolve(self, files):
174 def unresolve(self, files):
174 resolved = dict.fromkeys(self.resolved)
175 resolved = dict.fromkeys(self.resolved)
175 for fn in files:
176 for fn in files:
176 if fn not in resolved:
177 if fn not in resolved:
177 raise util.Abort('%s is not resolved' % fn)
178 raise util.Abort('%s is not resolved' % fn)
178 del resolved[fn]
179 del resolved[fn]
179 self.resolved = resolved.keys()
180 self.resolved = resolved.keys()
180 self.resolved.sort()
181 self.resolved.sort()
181 self.save()
182 self.save()
182 return 0
183 return 0
183
184
184 def pickle(self, dest):
185 def pickle(self, dest):
185 '''write current merge state to file to be resumed elsewhere'''
186 '''write current merge state to file to be resumed elsewhere'''
186 state = ImergeStateFile(self)
187 state = ImergeStateFile(self)
187 return state.save(dest)
188 return state.save(dest)
188
189
189 def unpickle(self, source):
190 def unpickle(self, source):
190 '''read merge state from file'''
191 '''read merge state from file'''
191 state = ImergeStateFile(self)
192 state = ImergeStateFile(self)
192 return state.load(source)
193 return state.load(source)
193
194
194 def load(im, source):
195 def load(im, source):
195 if im.merging():
196 if im.merging():
196 raise util.Abort('there is already a merge in progress '
197 raise util.Abort('there is already a merge in progress '
197 '(update -C <rev> to abort it)' )
198 '(update -C <rev> to abort it)' )
198 m, a, r, d = im.repo.status()[:4]
199 m, a, r, d = im.repo.status()[:4]
199 if m or a or r or d:
200 if m or a or r or d:
200 raise util.Abort('working directory has uncommitted changes')
201 raise util.Abort('working directory has uncommitted changes')
201
202
202 rc = im.unpickle(source)
203 rc = im.unpickle(source)
203 if not rc:
204 if not rc:
204 status(im)
205 status(im)
205 return rc
206 return rc
206
207
207 def merge_(im, filename=None, auto=False):
208 def merge_(im, filename=None, auto=False):
208 success = True
209 success = True
209 if auto and not filename:
210 if auto and not filename:
210 for fn in im.remaining():
211 for fn in im.remaining():
211 rc = im.filemerge(fn, interactive=False)
212 rc = im.filemerge(fn, interactive=False)
212 if rc:
213 if rc:
213 success = False
214 success = False
214 else:
215 else:
215 im.resolve([fn])
216 im.resolve([fn])
216 if success:
217 if success:
217 im.ui.write('all conflicts resolved\n')
218 im.ui.write('all conflicts resolved\n')
218 else:
219 else:
219 status(im)
220 status(im)
220 return 0
221 return 0
221
222
222 if not filename:
223 if not filename:
223 filename = im.next()
224 filename = im.next()
224 if not filename:
225 if not filename:
225 im.ui.write('all conflicts resolved\n')
226 im.ui.write('all conflicts resolved\n')
226 return 0
227 return 0
227
228
228 rc = im.filemerge(filename, interactive=not auto)
229 rc = im.filemerge(filename, interactive=not auto)
229 if not rc:
230 if not rc:
230 im.resolve([filename])
231 im.resolve([filename])
231 if not im.next():
232 if not im.next():
232 im.ui.write('all conflicts resolved\n')
233 im.ui.write('all conflicts resolved\n')
233 return rc
234 return rc
234
235
235 def next(im):
236 def next(im):
236 n = im.next()
237 n = im.next()
237 if n:
238 if n:
238 im.ui.write('%s\n' % n)
239 im.ui.write('%s\n' % n)
239 else:
240 else:
240 im.ui.write('all conflicts resolved\n')
241 im.ui.write('all conflicts resolved\n')
241 return 0
242 return 0
242
243
243 def resolve(im, *files):
244 def resolve(im, *files):
244 if not files:
245 if not files:
245 raise util.Abort('resolve requires at least one filename')
246 raise util.Abort('resolve requires at least one filename')
246 return im.resolve(files)
247 return im.resolve(files)
247
248
248 def save(im, dest):
249 def save(im, dest):
249 return im.pickle(dest)
250 return im.pickle(dest)
250
251
251 def status(im, **opts):
252 def status(im, **opts):
252 if not opts.get('resolved') and not opts.get('unresolved'):
253 if not opts.get('resolved') and not opts.get('unresolved'):
253 opts['resolved'] = True
254 opts['resolved'] = True
254 opts['unresolved'] = True
255 opts['unresolved'] = True
255
256
256 if im.ui.verbose:
257 if im.ui.verbose:
257 p1, p2 = [short(p.node()) for p in im.wctx.parents()]
258 p1, p2 = [short(p.node()) for p in im.wctx.parents()]
258 im.ui.note(_('merging %s and %s\n') % (p1, p2))
259 im.ui.note(_('merging %s and %s\n') % (p1, p2))
259
260
260 conflicts = im.conflicts.keys()
261 conflicts = im.conflicts.keys()
261 conflicts.sort()
262 conflicts.sort()
262 remaining = dict.fromkeys(im.remaining())
263 remaining = dict.fromkeys(im.remaining())
263 st = []
264 st = []
264 for fn in conflicts:
265 for fn in conflicts:
265 if opts.get('no_status'):
266 if opts.get('no_status'):
266 mode = ''
267 mode = ''
267 elif fn in remaining:
268 elif fn in remaining:
268 mode = 'U '
269 mode = 'U '
269 else:
270 else:
270 mode = 'R '
271 mode = 'R '
271 if ((opts.get('resolved') and fn not in remaining)
272 if ((opts.get('resolved') and fn not in remaining)
272 or (opts.get('unresolved') and fn in remaining)):
273 or (opts.get('unresolved') and fn in remaining)):
273 st.append((mode, fn))
274 st.append((mode, fn))
274 st.sort()
275 st.sort()
275 for (mode, fn) in st:
276 for (mode, fn) in st:
276 if im.ui.verbose:
277 if im.ui.verbose:
277 fo, fd = im.conflicts[fn]
278 fo, fd = im.conflicts[fn]
278 if fd != fn:
279 if fd != fn:
279 fn = '%s (%s)' % (fn, fd)
280 fn = '%s (%s)' % (fn, fd)
280 im.ui.write('%s%s\n' % (mode, fn))
281 im.ui.write('%s%s\n' % (mode, fn))
281 if opts.get('unresolved') and not remaining:
282 if opts.get('unresolved') and not remaining:
282 im.ui.write(_('all conflicts resolved\n'))
283 im.ui.write(_('all conflicts resolved\n'))
283
284
284 return 0
285 return 0
285
286
286 def unresolve(im, *files):
287 def unresolve(im, *files):
287 if not files:
288 if not files:
288 raise util.Abort('unresolve requires at least one filename')
289 raise util.Abort('unresolve requires at least one filename')
289 return im.unresolve(files)
290 return im.unresolve(files)
290
291
291 subcmdtable = {
292 subcmdtable = {
292 'load': (load, []),
293 'load': (load, []),
293 'merge':
294 'merge':
294 (merge_,
295 (merge_,
295 [('a', 'auto', None, _('automatically resolve if possible'))]),
296 [('a', 'auto', None, _('automatically resolve if possible'))]),
296 'next': (next, []),
297 'next': (next, []),
297 'resolve': (resolve, []),
298 'resolve': (resolve, []),
298 'save': (save, []),
299 'save': (save, []),
299 'status':
300 'status':
300 (status,
301 (status,
301 [('n', 'no-status', None, _('hide status prefix')),
302 [('n', 'no-status', None, _('hide status prefix')),
302 ('', 'resolved', None, _('only show resolved conflicts')),
303 ('', 'resolved', None, _('only show resolved conflicts')),
303 ('', 'unresolved', None, _('only show unresolved conflicts'))]),
304 ('', 'unresolved', None, _('only show unresolved conflicts'))]),
304 'unresolve': (unresolve, [])
305 'unresolve': (unresolve, [])
305 }
306 }
306
307
307 def dispatch_(im, args, opts):
308 def dispatch_(im, args, opts):
308 def complete(s, choices):
309 def complete(s, choices):
309 candidates = []
310 candidates = []
310 for choice in choices:
311 for choice in choices:
311 if choice.startswith(s):
312 if choice.startswith(s):
312 candidates.append(choice)
313 candidates.append(choice)
313 return candidates
314 return candidates
314
315
315 c, args = args[0], list(args[1:])
316 c, args = args[0], list(args[1:])
316 cmd = complete(c, subcmdtable.keys())
317 cmd = complete(c, subcmdtable.keys())
317 if not cmd:
318 if not cmd:
318 raise cmdutil.UnknownCommand('imerge ' + c)
319 raise cmdutil.UnknownCommand('imerge ' + c)
319 if len(cmd) > 1:
320 if len(cmd) > 1:
320 cmd.sort()
321 cmd.sort()
321 raise cmdutil.AmbiguousCommand('imerge ' + c, cmd)
322 raise cmdutil.AmbiguousCommand('imerge ' + c, cmd)
322 cmd = cmd[0]
323 cmd = cmd[0]
323
324
324 func, optlist = subcmdtable[cmd]
325 func, optlist = subcmdtable[cmd]
325 opts = {}
326 opts = {}
326 try:
327 try:
327 args = fancyopts.fancyopts(args, optlist, opts)
328 args = fancyopts.fancyopts(args, optlist, opts)
328 return func(im, *args, **opts)
329 return func(im, *args, **opts)
329 except fancyopts.getopt.GetoptError, inst:
330 except fancyopts.getopt.GetoptError, inst:
330 raise dispatch.ParseError('imerge', '%s: %s' % (cmd, inst))
331 raise dispatch.ParseError('imerge', '%s: %s' % (cmd, inst))
331 except TypeError:
332 except TypeError:
332 raise dispatch.ParseError('imerge', _('%s: invalid arguments') % cmd)
333 raise dispatch.ParseError('imerge', _('%s: invalid arguments') % cmd)
333
334
334 def imerge(ui, repo, *args, **opts):
335 def imerge(ui, repo, *args, **opts):
335 '''interactive merge
336 '''interactive merge
336
337
337 imerge lets you split a merge into pieces. When you start a merge
338 imerge lets you split a merge into pieces. When you start a merge
338 with imerge, the names of all files with conflicts are recorded.
339 with imerge, the names of all files with conflicts are recorded.
339 You can then merge any of these files, and if the merge is
340 You can then merge any of these files, and if the merge is
340 successful, they will be marked as resolved. When all files are
341 successful, they will be marked as resolved. When all files are
341 resolved, the merge is complete.
342 resolved, the merge is complete.
342
343
343 If no merge is in progress, hg imerge [rev] will merge the working
344 If no merge is in progress, hg imerge [rev] will merge the working
344 directory with rev (defaulting to the other head if the repository
345 directory with rev (defaulting to the other head if the repository
345 only has two heads). You may also resume a saved merge with
346 only has two heads). You may also resume a saved merge with
346 hg imerge load <file>.
347 hg imerge load <file>.
347
348
348 If a merge is in progress, hg imerge will default to merging the
349 If a merge is in progress, hg imerge will default to merging the
349 next unresolved file.
350 next unresolved file.
350
351
351 The following subcommands are available:
352 The following subcommands are available:
352
353
353 status:
354 status:
354 show the current state of the merge
355 show the current state of the merge
355 options:
356 options:
356 -n --no-status: do not print the status prefix
357 -n --no-status: do not print the status prefix
357 --resolved: only print resolved conflicts
358 --resolved: only print resolved conflicts
358 --unresolved: only print unresolved conflicts
359 --unresolved: only print unresolved conflicts
359 next:
360 next:
360 show the next unresolved file merge
361 show the next unresolved file merge
361 merge [<file>]:
362 merge [<file>]:
362 merge <file>. If the file merge is successful, the file will be
363 merge <file>. If the file merge is successful, the file will be
363 recorded as resolved. If no file is given, the next unresolved
364 recorded as resolved. If no file is given, the next unresolved
364 file will be merged.
365 file will be merged.
365 resolve <file>...:
366 resolve <file>...:
366 mark files as successfully merged
367 mark files as successfully merged
367 unresolve <file>...:
368 unresolve <file>...:
368 mark files as requiring merging.
369 mark files as requiring merging.
369 save <file>:
370 save <file>:
370 save the state of the merge to a file to be resumed elsewhere
371 save the state of the merge to a file to be resumed elsewhere
371 load <file>:
372 load <file>:
372 load the state of the merge from a file created by save
373 load the state of the merge from a file created by save
373 '''
374 '''
374
375
375 im = Imerge(ui, repo)
376 im = Imerge(ui, repo)
376
377
377 if im.merging():
378 if im.merging():
378 im.resume()
379 im.resume()
379 else:
380 else:
380 rev = opts.get('rev')
381 rev = opts.get('rev')
381 if rev and args:
382 if rev and args:
382 raise util.Abort('please specify just one revision')
383 raise util.Abort('please specify just one revision')
383
384
384 if len(args) == 2 and args[0] == 'load':
385 if len(args) == 2 and args[0] == 'load':
385 pass
386 pass
386 else:
387 else:
387 if args:
388 if args:
388 rev = args[0]
389 rev = args[0]
389 im.start(rev=rev)
390 im.start(rev=rev)
390 if opts.get('auto'):
391 if opts.get('auto'):
391 args = ['merge', '--auto']
392 args = ['merge', '--auto']
392 else:
393 else:
393 args = ['status']
394 args = ['status']
394
395
395 if not args:
396 if not args:
396 args = ['merge']
397 args = ['merge']
397
398
398 return dispatch_(im, args, opts)
399 return dispatch_(im, args, opts)
399
400
400 cmdtable = {
401 cmdtable = {
401 '^imerge':
402 '^imerge':
402 (imerge,
403 (imerge,
403 [('r', 'rev', '', _('revision to merge')),
404 [('r', 'rev', '', _('revision to merge')),
404 ('a', 'auto', None, _('automatically merge where possible'))],
405 ('a', 'auto', None, _('automatically merge where possible'))],
405 'hg imerge [command]')
406 'hg imerge [command]')
406 }
407 }
@@ -1,282 +1,283 b''
1 """
1 """
2 bundlerepo.py - repository class for viewing uncompressed bundles
2 bundlerepo.py - repository class for viewing uncompressed bundles
3
3
4 This provides a read-only repository interface to bundles as if
4 This provides a read-only repository interface to bundles as if
5 they were part of the actual repository.
5 they were part of the actual repository.
6
6
7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 from i18n import _
14 from i18n import _
15 import changegroup, util, os, struct, bz2, tempfile, mdiff
15 import changegroup, util, os, struct, bz2, tempfile, mdiff
16 import localrepo, changelog, manifest, filelog, revlog
16 import localrepo, changelog, manifest, filelog, revlog
17
17
18 class bundlerevlog(revlog.revlog):
18 class bundlerevlog(revlog.revlog):
19 def __init__(self, opener, indexfile, bundlefile,
19 def __init__(self, opener, indexfile, bundlefile,
20 linkmapper=None):
20 linkmapper=None):
21 # How it works:
21 # How it works:
22 # to retrieve a revision, we need to know the offset of
22 # to retrieve a revision, we need to know the offset of
23 # the revision in the bundlefile (an opened file).
23 # the revision in the bundlefile (an opened file).
24 #
24 #
25 # We store this offset in the index (start), to differentiate a
25 # We store this offset in the index (start), to differentiate a
26 # rev in the bundle and from a rev in the revlog, we check
26 # rev in the bundle and from a rev in the revlog, we check
27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 # (it is bigger since we store the node to which the delta is)
28 # (it is bigger since we store the node to which the delta is)
29 #
29 #
30 revlog.revlog.__init__(self, opener, indexfile)
30 revlog.revlog.__init__(self, opener, indexfile)
31 self.bundlefile = bundlefile
31 self.bundlefile = bundlefile
32 self.basemap = {}
32 self.basemap = {}
33 def chunkpositer():
33 def chunkpositer():
34 for chunk in changegroup.chunkiter(bundlefile):
34 for chunk in changegroup.chunkiter(bundlefile):
35 pos = bundlefile.tell()
35 pos = bundlefile.tell()
36 yield chunk, pos - len(chunk)
36 yield chunk, pos - len(chunk)
37 n = self.count()
37 n = self.count()
38 prev = None
38 prev = None
39 for chunk, start in chunkpositer():
39 for chunk, start in chunkpositer():
40 size = len(chunk)
40 size = len(chunk)
41 if size < 80:
41 if size < 80:
42 raise util.Abort("invalid changegroup")
42 raise util.Abort("invalid changegroup")
43 start += 80
43 start += 80
44 size -= 80
44 size -= 80
45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
46 if node in self.nodemap:
46 if node in self.nodemap:
47 prev = node
47 prev = node
48 continue
48 continue
49 for p in (p1, p2):
49 for p in (p1, p2):
50 if not p in self.nodemap:
50 if not p in self.nodemap:
51 raise revlog.LookupError(hex(p1), _("unknown parent %s") % short(p1))
51 raise revlog.LookupError(p1, self.indexfile,
52 _("unknown parent"))
52 if linkmapper is None:
53 if linkmapper is None:
53 link = n
54 link = n
54 else:
55 else:
55 link = linkmapper(cs)
56 link = linkmapper(cs)
56
57
57 if not prev:
58 if not prev:
58 prev = p1
59 prev = p1
59 # start, size, full unc. size, base (unused), link, p1, p2, node
60 # start, size, full unc. size, base (unused), link, p1, p2, node
60 e = (revlog.offset_type(start, 0), size, -1, -1, link,
61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
61 self.rev(p1), self.rev(p2), node)
62 self.rev(p1), self.rev(p2), node)
62 self.basemap[n] = prev
63 self.basemap[n] = prev
63 self.index.insert(-1, e)
64 self.index.insert(-1, e)
64 self.nodemap[node] = n
65 self.nodemap[node] = n
65 prev = node
66 prev = node
66 n += 1
67 n += 1
67
68
68 def bundle(self, rev):
69 def bundle(self, rev):
69 """is rev from the bundle"""
70 """is rev from the bundle"""
70 if rev < 0:
71 if rev < 0:
71 return False
72 return False
72 return rev in self.basemap
73 return rev in self.basemap
73 def bundlebase(self, rev): return self.basemap[rev]
74 def bundlebase(self, rev): return self.basemap[rev]
74 def chunk(self, rev, df=None, cachelen=4096):
75 def chunk(self, rev, df=None, cachelen=4096):
75 # Warning: in case of bundle, the diff is against bundlebase,
76 # Warning: in case of bundle, the diff is against bundlebase,
76 # not against rev - 1
77 # not against rev - 1
77 # XXX: could use some caching
78 # XXX: could use some caching
78 if not self.bundle(rev):
79 if not self.bundle(rev):
79 return revlog.revlog.chunk(self, rev, df)
80 return revlog.revlog.chunk(self, rev, df)
80 self.bundlefile.seek(self.start(rev))
81 self.bundlefile.seek(self.start(rev))
81 return self.bundlefile.read(self.length(rev))
82 return self.bundlefile.read(self.length(rev))
82
83
83 def revdiff(self, rev1, rev2):
84 def revdiff(self, rev1, rev2):
84 """return or calculate a delta between two revisions"""
85 """return or calculate a delta between two revisions"""
85 if self.bundle(rev1) and self.bundle(rev2):
86 if self.bundle(rev1) and self.bundle(rev2):
86 # hot path for bundle
87 # hot path for bundle
87 revb = self.rev(self.bundlebase(rev2))
88 revb = self.rev(self.bundlebase(rev2))
88 if revb == rev1:
89 if revb == rev1:
89 return self.chunk(rev2)
90 return self.chunk(rev2)
90 elif not self.bundle(rev1) and not self.bundle(rev2):
91 elif not self.bundle(rev1) and not self.bundle(rev2):
91 return revlog.revlog.revdiff(self, rev1, rev2)
92 return revlog.revlog.revdiff(self, rev1, rev2)
92
93
93 return mdiff.textdiff(self.revision(self.node(rev1)),
94 return mdiff.textdiff(self.revision(self.node(rev1)),
94 self.revision(self.node(rev2)))
95 self.revision(self.node(rev2)))
95
96
96 def revision(self, node):
97 def revision(self, node):
97 """return an uncompressed revision of a given"""
98 """return an uncompressed revision of a given"""
98 if node == nullid: return ""
99 if node == nullid: return ""
99
100
100 text = None
101 text = None
101 chain = []
102 chain = []
102 iter_node = node
103 iter_node = node
103 rev = self.rev(iter_node)
104 rev = self.rev(iter_node)
104 # reconstruct the revision if it is from a changegroup
105 # reconstruct the revision if it is from a changegroup
105 while self.bundle(rev):
106 while self.bundle(rev):
106 if self._cache and self._cache[0] == iter_node:
107 if self._cache and self._cache[0] == iter_node:
107 text = self._cache[2]
108 text = self._cache[2]
108 break
109 break
109 chain.append(rev)
110 chain.append(rev)
110 iter_node = self.bundlebase(rev)
111 iter_node = self.bundlebase(rev)
111 rev = self.rev(iter_node)
112 rev = self.rev(iter_node)
112 if text is None:
113 if text is None:
113 text = revlog.revlog.revision(self, iter_node)
114 text = revlog.revlog.revision(self, iter_node)
114
115
115 while chain:
116 while chain:
116 delta = self.chunk(chain.pop())
117 delta = self.chunk(chain.pop())
117 text = mdiff.patches(text, [delta])
118 text = mdiff.patches(text, [delta])
118
119
119 p1, p2 = self.parents(node)
120 p1, p2 = self.parents(node)
120 if node != revlog.hash(text, p1, p2):
121 if node != revlog.hash(text, p1, p2):
121 raise revlog.RevlogError(_("integrity check failed on %s:%d")
122 raise revlog.RevlogError(_("integrity check failed on %s:%d")
122 % (self.datafile, self.rev(node)))
123 % (self.datafile, self.rev(node)))
123
124
124 self._cache = (node, self.rev(node), text)
125 self._cache = (node, self.rev(node), text)
125 return text
126 return text
126
127
127 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
128 raise NotImplementedError
129 raise NotImplementedError
129 def addgroup(self, revs, linkmapper, transaction, unique=0):
130 def addgroup(self, revs, linkmapper, transaction, unique=0):
130 raise NotImplementedError
131 raise NotImplementedError
131 def strip(self, rev, minlink):
132 def strip(self, rev, minlink):
132 raise NotImplementedError
133 raise NotImplementedError
133 def checksize(self):
134 def checksize(self):
134 raise NotImplementedError
135 raise NotImplementedError
135
136
136 class bundlechangelog(bundlerevlog, changelog.changelog):
137 class bundlechangelog(bundlerevlog, changelog.changelog):
137 def __init__(self, opener, bundlefile):
138 def __init__(self, opener, bundlefile):
138 changelog.changelog.__init__(self, opener)
139 changelog.changelog.__init__(self, opener)
139 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
140
141
141 class bundlemanifest(bundlerevlog, manifest.manifest):
142 class bundlemanifest(bundlerevlog, manifest.manifest):
142 def __init__(self, opener, bundlefile, linkmapper):
143 def __init__(self, opener, bundlefile, linkmapper):
143 manifest.manifest.__init__(self, opener)
144 manifest.manifest.__init__(self, opener)
144 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
145 linkmapper)
146 linkmapper)
146
147
147 class bundlefilelog(bundlerevlog, filelog.filelog):
148 class bundlefilelog(bundlerevlog, filelog.filelog):
148 def __init__(self, opener, path, bundlefile, linkmapper):
149 def __init__(self, opener, path, bundlefile, linkmapper):
149 filelog.filelog.__init__(self, opener, path)
150 filelog.filelog.__init__(self, opener, path)
150 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
151 linkmapper)
152 linkmapper)
152
153
153 class bundlerepository(localrepo.localrepository):
154 class bundlerepository(localrepo.localrepository):
154 def __init__(self, ui, path, bundlename):
155 def __init__(self, ui, path, bundlename):
155 localrepo.localrepository.__init__(self, ui, path)
156 localrepo.localrepository.__init__(self, ui, path)
156
157
157 if path:
158 if path:
158 self._url = 'bundle:' + path + '+' + bundlename
159 self._url = 'bundle:' + path + '+' + bundlename
159 else:
160 else:
160 self._url = 'bundle:' + bundlename
161 self._url = 'bundle:' + bundlename
161
162
162 self.tempfile = None
163 self.tempfile = None
163 self.bundlefile = open(bundlename, "rb")
164 self.bundlefile = open(bundlename, "rb")
164 header = self.bundlefile.read(6)
165 header = self.bundlefile.read(6)
165 if not header.startswith("HG"):
166 if not header.startswith("HG"):
166 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
167 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
167 elif not header.startswith("HG10"):
168 elif not header.startswith("HG10"):
168 raise util.Abort(_("%s: unknown bundle version") % bundlename)
169 raise util.Abort(_("%s: unknown bundle version") % bundlename)
169 elif header == "HG10BZ":
170 elif header == "HG10BZ":
170 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
171 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
171 suffix=".hg10un", dir=self.path)
172 suffix=".hg10un", dir=self.path)
172 self.tempfile = temp
173 self.tempfile = temp
173 fptemp = os.fdopen(fdtemp, 'wb')
174 fptemp = os.fdopen(fdtemp, 'wb')
174 def generator(f):
175 def generator(f):
175 zd = bz2.BZ2Decompressor()
176 zd = bz2.BZ2Decompressor()
176 zd.decompress("BZ")
177 zd.decompress("BZ")
177 for chunk in f:
178 for chunk in f:
178 yield zd.decompress(chunk)
179 yield zd.decompress(chunk)
179 gen = generator(util.filechunkiter(self.bundlefile, 4096))
180 gen = generator(util.filechunkiter(self.bundlefile, 4096))
180
181
181 try:
182 try:
182 fptemp.write("HG10UN")
183 fptemp.write("HG10UN")
183 for chunk in gen:
184 for chunk in gen:
184 fptemp.write(chunk)
185 fptemp.write(chunk)
185 finally:
186 finally:
186 fptemp.close()
187 fptemp.close()
187 self.bundlefile.close()
188 self.bundlefile.close()
188
189
189 self.bundlefile = open(self.tempfile, "rb")
190 self.bundlefile = open(self.tempfile, "rb")
190 # seek right after the header
191 # seek right after the header
191 self.bundlefile.seek(6)
192 self.bundlefile.seek(6)
192 elif header == "HG10UN":
193 elif header == "HG10UN":
193 # nothing to do
194 # nothing to do
194 pass
195 pass
195 else:
196 else:
196 raise util.Abort(_("%s: unknown bundle compression type")
197 raise util.Abort(_("%s: unknown bundle compression type")
197 % bundlename)
198 % bundlename)
198 # dict with the mapping 'filename' -> position in the bundle
199 # dict with the mapping 'filename' -> position in the bundle
199 self.bundlefilespos = {}
200 self.bundlefilespos = {}
200
201
201 def __getattr__(self, name):
202 def __getattr__(self, name):
202 if name == 'changelog':
203 if name == 'changelog':
203 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
204 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
204 self.manstart = self.bundlefile.tell()
205 self.manstart = self.bundlefile.tell()
205 return self.changelog
206 return self.changelog
206 if name == 'manifest':
207 if name == 'manifest':
207 self.bundlefile.seek(self.manstart)
208 self.bundlefile.seek(self.manstart)
208 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
209 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
209 self.changelog.rev)
210 self.changelog.rev)
210 self.filestart = self.bundlefile.tell()
211 self.filestart = self.bundlefile.tell()
211 return self.manifest
212 return self.manifest
212 if name == 'manstart':
213 if name == 'manstart':
213 self.changelog
214 self.changelog
214 return self.manstart
215 return self.manstart
215 if name == 'filestart':
216 if name == 'filestart':
216 self.manifest
217 self.manifest
217 return self.filestart
218 return self.filestart
218 return localrepo.localrepository.__getattr__(self, name)
219 return localrepo.localrepository.__getattr__(self, name)
219
220
220 def url(self):
221 def url(self):
221 return self._url
222 return self._url
222
223
223 def dev(self):
224 def dev(self):
224 return -1
225 return -1
225
226
226 def file(self, f):
227 def file(self, f):
227 if not self.bundlefilespos:
228 if not self.bundlefilespos:
228 self.bundlefile.seek(self.filestart)
229 self.bundlefile.seek(self.filestart)
229 while 1:
230 while 1:
230 chunk = changegroup.getchunk(self.bundlefile)
231 chunk = changegroup.getchunk(self.bundlefile)
231 if not chunk:
232 if not chunk:
232 break
233 break
233 self.bundlefilespos[chunk] = self.bundlefile.tell()
234 self.bundlefilespos[chunk] = self.bundlefile.tell()
234 for c in changegroup.chunkiter(self.bundlefile):
235 for c in changegroup.chunkiter(self.bundlefile):
235 pass
236 pass
236
237
237 if f[0] == '/':
238 if f[0] == '/':
238 f = f[1:]
239 f = f[1:]
239 if f in self.bundlefilespos:
240 if f in self.bundlefilespos:
240 self.bundlefile.seek(self.bundlefilespos[f])
241 self.bundlefile.seek(self.bundlefilespos[f])
241 return bundlefilelog(self.sopener, f, self.bundlefile,
242 return bundlefilelog(self.sopener, f, self.bundlefile,
242 self.changelog.rev)
243 self.changelog.rev)
243 else:
244 else:
244 return filelog.filelog(self.sopener, f)
245 return filelog.filelog(self.sopener, f)
245
246
246 def close(self):
247 def close(self):
247 """Close assigned bundle file immediately."""
248 """Close assigned bundle file immediately."""
248 self.bundlefile.close()
249 self.bundlefile.close()
249
250
250 def __del__(self):
251 def __del__(self):
251 bundlefile = getattr(self, 'bundlefile', None)
252 bundlefile = getattr(self, 'bundlefile', None)
252 if bundlefile and not bundlefile.closed:
253 if bundlefile and not bundlefile.closed:
253 bundlefile.close()
254 bundlefile.close()
254 tempfile = getattr(self, 'tempfile', None)
255 tempfile = getattr(self, 'tempfile', None)
255 if tempfile is not None:
256 if tempfile is not None:
256 os.unlink(tempfile)
257 os.unlink(tempfile)
257
258
258 def instance(ui, path, create):
259 def instance(ui, path, create):
259 if create:
260 if create:
260 raise util.Abort(_('cannot create new bundle repository'))
261 raise util.Abort(_('cannot create new bundle repository'))
261 parentpath = ui.config("bundle", "mainreporoot", "")
262 parentpath = ui.config("bundle", "mainreporoot", "")
262 if parentpath:
263 if parentpath:
263 # Try to make the full path relative so we get a nice, short URL.
264 # Try to make the full path relative so we get a nice, short URL.
264 # In particular, we don't want temp dir names in test outputs.
265 # In particular, we don't want temp dir names in test outputs.
265 cwd = os.getcwd()
266 cwd = os.getcwd()
266 if parentpath == cwd:
267 if parentpath == cwd:
267 parentpath = ''
268 parentpath = ''
268 else:
269 else:
269 cwd = os.path.join(cwd,'')
270 cwd = os.path.join(cwd,'')
270 if parentpath.startswith(cwd):
271 if parentpath.startswith(cwd):
271 parentpath = parentpath[len(cwd):]
272 parentpath = parentpath[len(cwd):]
272 path = util.drop_scheme('file', path)
273 path = util.drop_scheme('file', path)
273 if path.startswith('bundle:'):
274 if path.startswith('bundle:'):
274 path = util.drop_scheme('bundle', path)
275 path = util.drop_scheme('bundle', path)
275 s = path.split("+", 1)
276 s = path.split("+", 1)
276 if len(s) == 1:
277 if len(s) == 1:
277 repopath, bundlename = parentpath, s[0]
278 repopath, bundlename = parentpath, s[0]
278 else:
279 else:
279 repopath, bundlename = s
280 repopath, bundlename = s
280 else:
281 else:
281 repopath, bundlename = parentpath, path
282 repopath, bundlename = parentpath, path
282 return bundlerepository(ui, repopath, bundlename)
283 return bundlerepository(ui, repopath, bundlename)
@@ -1,620 +1,622 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, nullrev, short
8 from node import nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, revlog, util, os, errno
10 import ancestor, bdiff, revlog, util, os, errno
11
11
12 class changectx(object):
12 class changectx(object):
13 """A changecontext object makes access to data related to a particular
13 """A changecontext object makes access to data related to a particular
14 changeset convenient."""
14 changeset convenient."""
15 def __init__(self, repo, changeid=None):
15 def __init__(self, repo, changeid=None):
16 """changeid is a revision number, node, or tag"""
16 """changeid is a revision number, node, or tag"""
17 self._repo = repo
17 self._repo = repo
18
18
19 if not changeid and changeid != 0:
19 if not changeid and changeid != 0:
20 p1, p2 = self._repo.dirstate.parents()
20 p1, p2 = self._repo.dirstate.parents()
21 self._rev = self._repo.changelog.rev(p1)
21 self._rev = self._repo.changelog.rev(p1)
22 if self._rev == -1:
22 if self._rev == -1:
23 changeid = 'tip'
23 changeid = 'tip'
24 else:
24 else:
25 self._node = p1
25 self._node = p1
26 return
26 return
27
27
28 self._node = self._repo.lookup(changeid)
28 self._node = self._repo.lookup(changeid)
29 self._rev = self._repo.changelog.rev(self._node)
29 self._rev = self._repo.changelog.rev(self._node)
30
30
31 def __str__(self):
31 def __str__(self):
32 return short(self.node())
32 return short(self.node())
33
33
34 def __repr__(self):
34 def __repr__(self):
35 return "<changectx %s>" % str(self)
35 return "<changectx %s>" % str(self)
36
36
37 def __eq__(self, other):
37 def __eq__(self, other):
38 try:
38 try:
39 return self._rev == other._rev
39 return self._rev == other._rev
40 except AttributeError:
40 except AttributeError:
41 return False
41 return False
42
42
43 def __ne__(self, other):
43 def __ne__(self, other):
44 return not (self == other)
44 return not (self == other)
45
45
46 def __nonzero__(self):
46 def __nonzero__(self):
47 return self._rev != nullrev
47 return self._rev != nullrev
48
48
49 def __getattr__(self, name):
49 def __getattr__(self, name):
50 if name == '_changeset':
50 if name == '_changeset':
51 self._changeset = self._repo.changelog.read(self.node())
51 self._changeset = self._repo.changelog.read(self.node())
52 return self._changeset
52 return self._changeset
53 elif name == '_manifest':
53 elif name == '_manifest':
54 self._manifest = self._repo.manifest.read(self._changeset[0])
54 self._manifest = self._repo.manifest.read(self._changeset[0])
55 return self._manifest
55 return self._manifest
56 elif name == '_manifestdelta':
56 elif name == '_manifestdelta':
57 md = self._repo.manifest.readdelta(self._changeset[0])
57 md = self._repo.manifest.readdelta(self._changeset[0])
58 self._manifestdelta = md
58 self._manifestdelta = md
59 return self._manifestdelta
59 return self._manifestdelta
60 else:
60 else:
61 raise AttributeError, name
61 raise AttributeError, name
62
62
63 def __contains__(self, key):
63 def __contains__(self, key):
64 return key in self._manifest
64 return key in self._manifest
65
65
66 def __getitem__(self, key):
66 def __getitem__(self, key):
67 return self.filectx(key)
67 return self.filectx(key)
68
68
69 def __iter__(self):
69 def __iter__(self):
70 a = self._manifest.keys()
70 a = self._manifest.keys()
71 a.sort()
71 a.sort()
72 for f in a:
72 for f in a:
73 yield f
73 yield f
74
74
75 def changeset(self): return self._changeset
75 def changeset(self): return self._changeset
76 def manifest(self): return self._manifest
76 def manifest(self): return self._manifest
77
77
78 def rev(self): return self._rev
78 def rev(self): return self._rev
79 def node(self): return self._node
79 def node(self): return self._node
80 def user(self): return self._changeset[1]
80 def user(self): return self._changeset[1]
81 def date(self): return self._changeset[2]
81 def date(self): return self._changeset[2]
82 def files(self): return self._changeset[3]
82 def files(self): return self._changeset[3]
83 def description(self): return self._changeset[4]
83 def description(self): return self._changeset[4]
84 def branch(self): return self._changeset[5].get("branch")
84 def branch(self): return self._changeset[5].get("branch")
85 def extra(self): return self._changeset[5]
85 def extra(self): return self._changeset[5]
86 def tags(self): return self._repo.nodetags(self._node)
86 def tags(self): return self._repo.nodetags(self._node)
87
87
88 def parents(self):
88 def parents(self):
89 """return contexts for each parent changeset"""
89 """return contexts for each parent changeset"""
90 p = self._repo.changelog.parents(self._node)
90 p = self._repo.changelog.parents(self._node)
91 return [changectx(self._repo, x) for x in p]
91 return [changectx(self._repo, x) for x in p]
92
92
93 def children(self):
93 def children(self):
94 """return contexts for each child changeset"""
94 """return contexts for each child changeset"""
95 c = self._repo.changelog.children(self._node)
95 c = self._repo.changelog.children(self._node)
96 return [changectx(self._repo, x) for x in c]
96 return [changectx(self._repo, x) for x in c]
97
97
98 def _fileinfo(self, path):
98 def _fileinfo(self, path):
99 if '_manifest' in self.__dict__:
99 if '_manifest' in self.__dict__:
100 try:
100 try:
101 return self._manifest[path], self._manifest.flags(path)
101 return self._manifest[path], self._manifest.flags(path)
102 except KeyError:
102 except KeyError:
103 raise revlog.LookupError(path, _("'%s' not found in manifest") % path)
103 raise revlog.LookupError(self._node, path,
104 _('not found in manifest'))
104 if '_manifestdelta' in self.__dict__ or path in self.files():
105 if '_manifestdelta' in self.__dict__ or path in self.files():
105 if path in self._manifestdelta:
106 if path in self._manifestdelta:
106 return self._manifestdelta[path], self._manifestdelta.flags(path)
107 return self._manifestdelta[path], self._manifestdelta.flags(path)
107 node, flag = self._repo.manifest.find(self._changeset[0], path)
108 node, flag = self._repo.manifest.find(self._changeset[0], path)
108 if not node:
109 if not node:
109 raise revlog.LookupError(path, _("'%s' not found in manifest") % path)
110 raise revlog.LookupError(self._node, path,
111 _('not found in manifest'))
110
112
111 return node, flag
113 return node, flag
112
114
113 def filenode(self, path):
115 def filenode(self, path):
114 return self._fileinfo(path)[0]
116 return self._fileinfo(path)[0]
115
117
116 def fileflags(self, path):
118 def fileflags(self, path):
117 try:
119 try:
118 return self._fileinfo(path)[1]
120 return self._fileinfo(path)[1]
119 except revlog.LookupError:
121 except revlog.LookupError:
120 return ''
122 return ''
121
123
122 def filectx(self, path, fileid=None, filelog=None):
124 def filectx(self, path, fileid=None, filelog=None):
123 """get a file context from this changeset"""
125 """get a file context from this changeset"""
124 if fileid is None:
126 if fileid is None:
125 fileid = self.filenode(path)
127 fileid = self.filenode(path)
126 return filectx(self._repo, path, fileid=fileid,
128 return filectx(self._repo, path, fileid=fileid,
127 changectx=self, filelog=filelog)
129 changectx=self, filelog=filelog)
128
130
129 def filectxs(self):
131 def filectxs(self):
130 """generate a file context for each file in this changeset's
132 """generate a file context for each file in this changeset's
131 manifest"""
133 manifest"""
132 mf = self.manifest()
134 mf = self.manifest()
133 m = mf.keys()
135 m = mf.keys()
134 m.sort()
136 m.sort()
135 for f in m:
137 for f in m:
136 yield self.filectx(f, fileid=mf[f])
138 yield self.filectx(f, fileid=mf[f])
137
139
138 def ancestor(self, c2):
140 def ancestor(self, c2):
139 """
141 """
140 return the ancestor context of self and c2
142 return the ancestor context of self and c2
141 """
143 """
142 n = self._repo.changelog.ancestor(self._node, c2._node)
144 n = self._repo.changelog.ancestor(self._node, c2._node)
143 return changectx(self._repo, n)
145 return changectx(self._repo, n)
144
146
145 class filectx(object):
147 class filectx(object):
146 """A filecontext object makes access to data related to a particular
148 """A filecontext object makes access to data related to a particular
147 filerevision convenient."""
149 filerevision convenient."""
148 def __init__(self, repo, path, changeid=None, fileid=None,
150 def __init__(self, repo, path, changeid=None, fileid=None,
149 filelog=None, changectx=None):
151 filelog=None, changectx=None):
150 """changeid can be a changeset revision, node, or tag.
152 """changeid can be a changeset revision, node, or tag.
151 fileid can be a file revision or node."""
153 fileid can be a file revision or node."""
152 self._repo = repo
154 self._repo = repo
153 self._path = path
155 self._path = path
154
156
155 assert (changeid is not None
157 assert (changeid is not None
156 or fileid is not None
158 or fileid is not None
157 or changectx is not None)
159 or changectx is not None)
158
160
159 if filelog:
161 if filelog:
160 self._filelog = filelog
162 self._filelog = filelog
161
163
162 if changeid is not None:
164 if changeid is not None:
163 self._changeid = changeid
165 self._changeid = changeid
164 if changectx is not None:
166 if changectx is not None:
165 self._changectx = changectx
167 self._changectx = changectx
166 if fileid is not None:
168 if fileid is not None:
167 self._fileid = fileid
169 self._fileid = fileid
168
170
169 def __getattr__(self, name):
171 def __getattr__(self, name):
170 if name == '_changectx':
172 if name == '_changectx':
171 self._changectx = changectx(self._repo, self._changeid)
173 self._changectx = changectx(self._repo, self._changeid)
172 return self._changectx
174 return self._changectx
173 elif name == '_filelog':
175 elif name == '_filelog':
174 self._filelog = self._repo.file(self._path)
176 self._filelog = self._repo.file(self._path)
175 return self._filelog
177 return self._filelog
176 elif name == '_changeid':
178 elif name == '_changeid':
177 if '_changectx' in self.__dict__:
179 if '_changectx' in self.__dict__:
178 self._changeid = self._changectx.rev()
180 self._changeid = self._changectx.rev()
179 else:
181 else:
180 self._changeid = self._filelog.linkrev(self._filenode)
182 self._changeid = self._filelog.linkrev(self._filenode)
181 return self._changeid
183 return self._changeid
182 elif name == '_filenode':
184 elif name == '_filenode':
183 if '_fileid' in self.__dict__:
185 if '_fileid' in self.__dict__:
184 self._filenode = self._filelog.lookup(self._fileid)
186 self._filenode = self._filelog.lookup(self._fileid)
185 else:
187 else:
186 self._filenode = self._changectx.filenode(self._path)
188 self._filenode = self._changectx.filenode(self._path)
187 return self._filenode
189 return self._filenode
188 elif name == '_filerev':
190 elif name == '_filerev':
189 self._filerev = self._filelog.rev(self._filenode)
191 self._filerev = self._filelog.rev(self._filenode)
190 return self._filerev
192 return self._filerev
191 else:
193 else:
192 raise AttributeError, name
194 raise AttributeError, name
193
195
194 def __nonzero__(self):
196 def __nonzero__(self):
195 try:
197 try:
196 n = self._filenode
198 n = self._filenode
197 return True
199 return True
198 except revlog.LookupError:
200 except revlog.LookupError:
199 # file is missing
201 # file is missing
200 return False
202 return False
201
203
202 def __str__(self):
204 def __str__(self):
203 return "%s@%s" % (self.path(), short(self.node()))
205 return "%s@%s" % (self.path(), short(self.node()))
204
206
205 def __repr__(self):
207 def __repr__(self):
206 return "<filectx %s>" % str(self)
208 return "<filectx %s>" % str(self)
207
209
208 def __eq__(self, other):
210 def __eq__(self, other):
209 try:
211 try:
210 return (self._path == other._path
212 return (self._path == other._path
211 and self._fileid == other._fileid)
213 and self._fileid == other._fileid)
212 except AttributeError:
214 except AttributeError:
213 return False
215 return False
214
216
215 def __ne__(self, other):
217 def __ne__(self, other):
216 return not (self == other)
218 return not (self == other)
217
219
218 def filectx(self, fileid):
220 def filectx(self, fileid):
219 '''opens an arbitrary revision of the file without
221 '''opens an arbitrary revision of the file without
220 opening a new filelog'''
222 opening a new filelog'''
221 return filectx(self._repo, self._path, fileid=fileid,
223 return filectx(self._repo, self._path, fileid=fileid,
222 filelog=self._filelog)
224 filelog=self._filelog)
223
225
224 def filerev(self): return self._filerev
226 def filerev(self): return self._filerev
225 def filenode(self): return self._filenode
227 def filenode(self): return self._filenode
226 def fileflags(self): return self._changectx.fileflags(self._path)
228 def fileflags(self): return self._changectx.fileflags(self._path)
227 def isexec(self): return 'x' in self.fileflags()
229 def isexec(self): return 'x' in self.fileflags()
228 def islink(self): return 'l' in self.fileflags()
230 def islink(self): return 'l' in self.fileflags()
229 def filelog(self): return self._filelog
231 def filelog(self): return self._filelog
230
232
231 def rev(self):
233 def rev(self):
232 if '_changectx' in self.__dict__:
234 if '_changectx' in self.__dict__:
233 return self._changectx.rev()
235 return self._changectx.rev()
234 if '_changeid' in self.__dict__:
236 if '_changeid' in self.__dict__:
235 return self._changectx.rev()
237 return self._changectx.rev()
236 return self._filelog.linkrev(self._filenode)
238 return self._filelog.linkrev(self._filenode)
237
239
238 def linkrev(self): return self._filelog.linkrev(self._filenode)
240 def linkrev(self): return self._filelog.linkrev(self._filenode)
239 def node(self): return self._changectx.node()
241 def node(self): return self._changectx.node()
240 def user(self): return self._changectx.user()
242 def user(self): return self._changectx.user()
241 def date(self): return self._changectx.date()
243 def date(self): return self._changectx.date()
242 def files(self): return self._changectx.files()
244 def files(self): return self._changectx.files()
243 def description(self): return self._changectx.description()
245 def description(self): return self._changectx.description()
244 def branch(self): return self._changectx.branch()
246 def branch(self): return self._changectx.branch()
245 def manifest(self): return self._changectx.manifest()
247 def manifest(self): return self._changectx.manifest()
246 def changectx(self): return self._changectx
248 def changectx(self): return self._changectx
247
249
248 def data(self): return self._filelog.read(self._filenode)
250 def data(self): return self._filelog.read(self._filenode)
249 def path(self): return self._path
251 def path(self): return self._path
250 def size(self): return self._filelog.size(self._filerev)
252 def size(self): return self._filelog.size(self._filerev)
251
253
252 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
254 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
253
255
254 def renamed(self):
256 def renamed(self):
255 """check if file was actually renamed in this changeset revision
257 """check if file was actually renamed in this changeset revision
256
258
257 If rename logged in file revision, we report copy for changeset only
259 If rename logged in file revision, we report copy for changeset only
258 if file revisions linkrev points back to the changeset in question
260 if file revisions linkrev points back to the changeset in question
259 or both changeset parents contain different file revisions.
261 or both changeset parents contain different file revisions.
260 """
262 """
261
263
262 renamed = self._filelog.renamed(self._filenode)
264 renamed = self._filelog.renamed(self._filenode)
263 if not renamed:
265 if not renamed:
264 return renamed
266 return renamed
265
267
266 if self.rev() == self.linkrev():
268 if self.rev() == self.linkrev():
267 return renamed
269 return renamed
268
270
269 name = self.path()
271 name = self.path()
270 fnode = self._filenode
272 fnode = self._filenode
271 for p in self._changectx.parents():
273 for p in self._changectx.parents():
272 try:
274 try:
273 if fnode == p.filenode(name):
275 if fnode == p.filenode(name):
274 return None
276 return None
275 except revlog.LookupError:
277 except revlog.LookupError:
276 pass
278 pass
277 return renamed
279 return renamed
278
280
279 def parents(self):
281 def parents(self):
280 p = self._path
282 p = self._path
281 fl = self._filelog
283 fl = self._filelog
282 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
284 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
283
285
284 r = self._filelog.renamed(self._filenode)
286 r = self._filelog.renamed(self._filenode)
285 if r:
287 if r:
286 pl[0] = (r[0], r[1], None)
288 pl[0] = (r[0], r[1], None)
287
289
288 return [filectx(self._repo, p, fileid=n, filelog=l)
290 return [filectx(self._repo, p, fileid=n, filelog=l)
289 for p,n,l in pl if n != nullid]
291 for p,n,l in pl if n != nullid]
290
292
291 def children(self):
293 def children(self):
292 # hard for renames
294 # hard for renames
293 c = self._filelog.children(self._filenode)
295 c = self._filelog.children(self._filenode)
294 return [filectx(self._repo, self._path, fileid=x,
296 return [filectx(self._repo, self._path, fileid=x,
295 filelog=self._filelog) for x in c]
297 filelog=self._filelog) for x in c]
296
298
297 def annotate(self, follow=False, linenumber=None):
299 def annotate(self, follow=False, linenumber=None):
298 '''returns a list of tuples of (ctx, line) for each line
300 '''returns a list of tuples of (ctx, line) for each line
299 in the file, where ctx is the filectx of the node where
301 in the file, where ctx is the filectx of the node where
300 that line was last changed.
302 that line was last changed.
301 This returns tuples of ((ctx, linenumber), line) for each line,
303 This returns tuples of ((ctx, linenumber), line) for each line,
302 if "linenumber" parameter is NOT "None".
304 if "linenumber" parameter is NOT "None".
303 In such tuples, linenumber means one at the first appearance
305 In such tuples, linenumber means one at the first appearance
304 in the managed file.
306 in the managed file.
305 To reduce annotation cost,
307 To reduce annotation cost,
306 this returns fixed value(False is used) as linenumber,
308 this returns fixed value(False is used) as linenumber,
307 if "linenumber" parameter is "False".'''
309 if "linenumber" parameter is "False".'''
308
310
309 def decorate_compat(text, rev):
311 def decorate_compat(text, rev):
310 return ([rev] * len(text.splitlines()), text)
312 return ([rev] * len(text.splitlines()), text)
311
313
312 def without_linenumber(text, rev):
314 def without_linenumber(text, rev):
313 return ([(rev, False)] * len(text.splitlines()), text)
315 return ([(rev, False)] * len(text.splitlines()), text)
314
316
315 def with_linenumber(text, rev):
317 def with_linenumber(text, rev):
316 size = len(text.splitlines())
318 size = len(text.splitlines())
317 return ([(rev, i) for i in xrange(1, size + 1)], text)
319 return ([(rev, i) for i in xrange(1, size + 1)], text)
318
320
319 decorate = (((linenumber is None) and decorate_compat) or
321 decorate = (((linenumber is None) and decorate_compat) or
320 (linenumber and with_linenumber) or
322 (linenumber and with_linenumber) or
321 without_linenumber)
323 without_linenumber)
322
324
323 def pair(parent, child):
325 def pair(parent, child):
324 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
326 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
325 child[0][b1:b2] = parent[0][a1:a2]
327 child[0][b1:b2] = parent[0][a1:a2]
326 return child
328 return child
327
329
328 getlog = util.cachefunc(lambda x: self._repo.file(x))
330 getlog = util.cachefunc(lambda x: self._repo.file(x))
329 def getctx(path, fileid):
331 def getctx(path, fileid):
330 log = path == self._path and self._filelog or getlog(path)
332 log = path == self._path and self._filelog or getlog(path)
331 return filectx(self._repo, path, fileid=fileid, filelog=log)
333 return filectx(self._repo, path, fileid=fileid, filelog=log)
332 getctx = util.cachefunc(getctx)
334 getctx = util.cachefunc(getctx)
333
335
334 def parents(f):
336 def parents(f):
335 # we want to reuse filectx objects as much as possible
337 # we want to reuse filectx objects as much as possible
336 p = f._path
338 p = f._path
337 if f._filerev is None: # working dir
339 if f._filerev is None: # working dir
338 pl = [(n.path(), n.filerev()) for n in f.parents()]
340 pl = [(n.path(), n.filerev()) for n in f.parents()]
339 else:
341 else:
340 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
342 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
341
343
342 if follow:
344 if follow:
343 r = f.renamed()
345 r = f.renamed()
344 if r:
346 if r:
345 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
347 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
346
348
347 return [getctx(p, n) for p, n in pl if n != nullrev]
349 return [getctx(p, n) for p, n in pl if n != nullrev]
348
350
349 # use linkrev to find the first changeset where self appeared
351 # use linkrev to find the first changeset where self appeared
350 if self.rev() != self.linkrev():
352 if self.rev() != self.linkrev():
351 base = self.filectx(self.filerev())
353 base = self.filectx(self.filerev())
352 else:
354 else:
353 base = self
355 base = self
354
356
355 # find all ancestors
357 # find all ancestors
356 needed = {base: 1}
358 needed = {base: 1}
357 visit = [base]
359 visit = [base]
358 files = [base._path]
360 files = [base._path]
359 while visit:
361 while visit:
360 f = visit.pop(0)
362 f = visit.pop(0)
361 for p in parents(f):
363 for p in parents(f):
362 if p not in needed:
364 if p not in needed:
363 needed[p] = 1
365 needed[p] = 1
364 visit.append(p)
366 visit.append(p)
365 if p._path not in files:
367 if p._path not in files:
366 files.append(p._path)
368 files.append(p._path)
367 else:
369 else:
368 # count how many times we'll use this
370 # count how many times we'll use this
369 needed[p] += 1
371 needed[p] += 1
370
372
371 # sort by revision (per file) which is a topological order
373 # sort by revision (per file) which is a topological order
372 visit = []
374 visit = []
373 for f in files:
375 for f in files:
374 fn = [(n.rev(), n) for n in needed.keys() if n._path == f]
376 fn = [(n.rev(), n) for n in needed.keys() if n._path == f]
375 visit.extend(fn)
377 visit.extend(fn)
376 visit.sort()
378 visit.sort()
377 hist = {}
379 hist = {}
378
380
379 for r, f in visit:
381 for r, f in visit:
380 curr = decorate(f.data(), f)
382 curr = decorate(f.data(), f)
381 for p in parents(f):
383 for p in parents(f):
382 if p != nullid:
384 if p != nullid:
383 curr = pair(hist[p], curr)
385 curr = pair(hist[p], curr)
384 # trim the history of unneeded revs
386 # trim the history of unneeded revs
385 needed[p] -= 1
387 needed[p] -= 1
386 if not needed[p]:
388 if not needed[p]:
387 del hist[p]
389 del hist[p]
388 hist[f] = curr
390 hist[f] = curr
389
391
390 return zip(hist[f][0], hist[f][1].splitlines(1))
392 return zip(hist[f][0], hist[f][1].splitlines(1))
391
393
392 def ancestor(self, fc2):
394 def ancestor(self, fc2):
393 """
395 """
394 find the common ancestor file context, if any, of self, and fc2
396 find the common ancestor file context, if any, of self, and fc2
395 """
397 """
396
398
397 acache = {}
399 acache = {}
398
400
399 # prime the ancestor cache for the working directory
401 # prime the ancestor cache for the working directory
400 for c in (self, fc2):
402 for c in (self, fc2):
401 if c._filerev == None:
403 if c._filerev == None:
402 pl = [(n.path(), n.filenode()) for n in c.parents()]
404 pl = [(n.path(), n.filenode()) for n in c.parents()]
403 acache[(c._path, None)] = pl
405 acache[(c._path, None)] = pl
404
406
405 flcache = {self._path:self._filelog, fc2._path:fc2._filelog}
407 flcache = {self._path:self._filelog, fc2._path:fc2._filelog}
406 def parents(vertex):
408 def parents(vertex):
407 if vertex in acache:
409 if vertex in acache:
408 return acache[vertex]
410 return acache[vertex]
409 f, n = vertex
411 f, n = vertex
410 if f not in flcache:
412 if f not in flcache:
411 flcache[f] = self._repo.file(f)
413 flcache[f] = self._repo.file(f)
412 fl = flcache[f]
414 fl = flcache[f]
413 pl = [(f, p) for p in fl.parents(n) if p != nullid]
415 pl = [(f, p) for p in fl.parents(n) if p != nullid]
414 re = fl.renamed(n)
416 re = fl.renamed(n)
415 if re:
417 if re:
416 pl.append(re)
418 pl.append(re)
417 acache[vertex] = pl
419 acache[vertex] = pl
418 return pl
420 return pl
419
421
420 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
422 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
421 v = ancestor.ancestor(a, b, parents)
423 v = ancestor.ancestor(a, b, parents)
422 if v:
424 if v:
423 f, n = v
425 f, n = v
424 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
426 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
425
427
426 return None
428 return None
427
429
428 class workingctx(changectx):
430 class workingctx(changectx):
429 """A workingctx object makes access to data related to
431 """A workingctx object makes access to data related to
430 the current working directory convenient."""
432 the current working directory convenient."""
431 def __init__(self, repo):
433 def __init__(self, repo):
432 self._repo = repo
434 self._repo = repo
433 self._rev = None
435 self._rev = None
434 self._node = None
436 self._node = None
435
437
436 def __str__(self):
438 def __str__(self):
437 return str(self._parents[0]) + "+"
439 return str(self._parents[0]) + "+"
438
440
439 def __nonzero__(self):
441 def __nonzero__(self):
440 return True
442 return True
441
443
442 def __getattr__(self, name):
444 def __getattr__(self, name):
443 if name == '_parents':
445 if name == '_parents':
444 self._parents = self._repo.parents()
446 self._parents = self._repo.parents()
445 return self._parents
447 return self._parents
446 if name == '_status':
448 if name == '_status':
447 self._status = self._repo.status()
449 self._status = self._repo.status()
448 return self._status
450 return self._status
449 if name == '_manifest':
451 if name == '_manifest':
450 self._buildmanifest()
452 self._buildmanifest()
451 return self._manifest
453 return self._manifest
452 else:
454 else:
453 raise AttributeError, name
455 raise AttributeError, name
454
456
455 def _buildmanifest(self):
457 def _buildmanifest(self):
456 """generate a manifest corresponding to the working directory"""
458 """generate a manifest corresponding to the working directory"""
457
459
458 man = self._parents[0].manifest().copy()
460 man = self._parents[0].manifest().copy()
459 copied = self._repo.dirstate.copies()
461 copied = self._repo.dirstate.copies()
460 is_exec = util.execfunc(self._repo.root,
462 is_exec = util.execfunc(self._repo.root,
461 lambda p: man.execf(copied.get(p,p)))
463 lambda p: man.execf(copied.get(p,p)))
462 is_link = util.linkfunc(self._repo.root,
464 is_link = util.linkfunc(self._repo.root,
463 lambda p: man.linkf(copied.get(p,p)))
465 lambda p: man.linkf(copied.get(p,p)))
464 modified, added, removed, deleted, unknown = self._status[:5]
466 modified, added, removed, deleted, unknown = self._status[:5]
465 for i, l in (("a", added), ("m", modified), ("u", unknown)):
467 for i, l in (("a", added), ("m", modified), ("u", unknown)):
466 for f in l:
468 for f in l:
467 man[f] = man.get(copied.get(f, f), nullid) + i
469 man[f] = man.get(copied.get(f, f), nullid) + i
468 try:
470 try:
469 man.set(f, is_exec(f), is_link(f))
471 man.set(f, is_exec(f), is_link(f))
470 except OSError:
472 except OSError:
471 pass
473 pass
472
474
473 for f in deleted + removed:
475 for f in deleted + removed:
474 if f in man:
476 if f in man:
475 del man[f]
477 del man[f]
476
478
477 self._manifest = man
479 self._manifest = man
478
480
479 def manifest(self): return self._manifest
481 def manifest(self): return self._manifest
480
482
481 def user(self): return self._repo.ui.username()
483 def user(self): return self._repo.ui.username()
482 def date(self): return util.makedate()
484 def date(self): return util.makedate()
483 def description(self): return ""
485 def description(self): return ""
484 def files(self):
486 def files(self):
485 f = self.modified() + self.added() + self.removed()
487 f = self.modified() + self.added() + self.removed()
486 f.sort()
488 f.sort()
487 return f
489 return f
488
490
489 def modified(self): return self._status[0]
491 def modified(self): return self._status[0]
490 def added(self): return self._status[1]
492 def added(self): return self._status[1]
491 def removed(self): return self._status[2]
493 def removed(self): return self._status[2]
492 def deleted(self): return self._status[3]
494 def deleted(self): return self._status[3]
493 def unknown(self): return self._status[4]
495 def unknown(self): return self._status[4]
494 def clean(self): return self._status[5]
496 def clean(self): return self._status[5]
495 def branch(self): return self._repo.dirstate.branch()
497 def branch(self): return self._repo.dirstate.branch()
496
498
497 def tags(self):
499 def tags(self):
498 t = []
500 t = []
499 [t.extend(p.tags()) for p in self.parents()]
501 [t.extend(p.tags()) for p in self.parents()]
500 return t
502 return t
501
503
502 def parents(self):
504 def parents(self):
503 """return contexts for each parent changeset"""
505 """return contexts for each parent changeset"""
504 return self._parents
506 return self._parents
505
507
506 def children(self):
508 def children(self):
507 return []
509 return []
508
510
509 def fileflags(self, path):
511 def fileflags(self, path):
510 if '_manifest' in self.__dict__:
512 if '_manifest' in self.__dict__:
511 try:
513 try:
512 return self._manifest.flags(path)
514 return self._manifest.flags(path)
513 except KeyError:
515 except KeyError:
514 return ''
516 return ''
515
517
516 pnode = self._parents[0].changeset()[0]
518 pnode = self._parents[0].changeset()[0]
517 orig = self._repo.dirstate.copies().get(path, path)
519 orig = self._repo.dirstate.copies().get(path, path)
518 node, flag = self._repo.manifest.find(pnode, orig)
520 node, flag = self._repo.manifest.find(pnode, orig)
519 is_link = util.linkfunc(self._repo.root, lambda p: 'l' in flag)
521 is_link = util.linkfunc(self._repo.root, lambda p: 'l' in flag)
520 is_exec = util.execfunc(self._repo.root, lambda p: 'x' in flag)
522 is_exec = util.execfunc(self._repo.root, lambda p: 'x' in flag)
521 try:
523 try:
522 return (is_link(path) and 'l' or '') + (is_exec(path) and 'e' or '')
524 return (is_link(path) and 'l' or '') + (is_exec(path) and 'e' or '')
523 except OSError:
525 except OSError:
524 pass
526 pass
525
527
526 if not node or path in self.deleted() or path in self.removed():
528 if not node or path in self.deleted() or path in self.removed():
527 return ''
529 return ''
528 return flag
530 return flag
529
531
530 def filectx(self, path, filelog=None):
532 def filectx(self, path, filelog=None):
531 """get a file context from the working directory"""
533 """get a file context from the working directory"""
532 return workingfilectx(self._repo, path, workingctx=self,
534 return workingfilectx(self._repo, path, workingctx=self,
533 filelog=filelog)
535 filelog=filelog)
534
536
535 def ancestor(self, c2):
537 def ancestor(self, c2):
536 """return the ancestor context of self and c2"""
538 """return the ancestor context of self and c2"""
537 return self._parents[0].ancestor(c2) # punt on two parents for now
539 return self._parents[0].ancestor(c2) # punt on two parents for now
538
540
539 class workingfilectx(filectx):
541 class workingfilectx(filectx):
540 """A workingfilectx object makes access to data related to a particular
542 """A workingfilectx object makes access to data related to a particular
541 file in the working directory convenient."""
543 file in the working directory convenient."""
542 def __init__(self, repo, path, filelog=None, workingctx=None):
544 def __init__(self, repo, path, filelog=None, workingctx=None):
543 """changeid can be a changeset revision, node, or tag.
545 """changeid can be a changeset revision, node, or tag.
544 fileid can be a file revision or node."""
546 fileid can be a file revision or node."""
545 self._repo = repo
547 self._repo = repo
546 self._path = path
548 self._path = path
547 self._changeid = None
549 self._changeid = None
548 self._filerev = self._filenode = None
550 self._filerev = self._filenode = None
549
551
550 if filelog:
552 if filelog:
551 self._filelog = filelog
553 self._filelog = filelog
552 if workingctx:
554 if workingctx:
553 self._changectx = workingctx
555 self._changectx = workingctx
554
556
555 def __getattr__(self, name):
557 def __getattr__(self, name):
556 if name == '_changectx':
558 if name == '_changectx':
557 self._changectx = workingctx(self._repo)
559 self._changectx = workingctx(self._repo)
558 return self._changectx
560 return self._changectx
559 elif name == '_repopath':
561 elif name == '_repopath':
560 self._repopath = (self._repo.dirstate.copied(self._path)
562 self._repopath = (self._repo.dirstate.copied(self._path)
561 or self._path)
563 or self._path)
562 return self._repopath
564 return self._repopath
563 elif name == '_filelog':
565 elif name == '_filelog':
564 self._filelog = self._repo.file(self._repopath)
566 self._filelog = self._repo.file(self._repopath)
565 return self._filelog
567 return self._filelog
566 else:
568 else:
567 raise AttributeError, name
569 raise AttributeError, name
568
570
569 def __nonzero__(self):
571 def __nonzero__(self):
570 return True
572 return True
571
573
572 def __str__(self):
574 def __str__(self):
573 return "%s@%s" % (self.path(), self._changectx)
575 return "%s@%s" % (self.path(), self._changectx)
574
576
575 def filectx(self, fileid):
577 def filectx(self, fileid):
576 '''opens an arbitrary revision of the file without
578 '''opens an arbitrary revision of the file without
577 opening a new filelog'''
579 opening a new filelog'''
578 return filectx(self._repo, self._repopath, fileid=fileid,
580 return filectx(self._repo, self._repopath, fileid=fileid,
579 filelog=self._filelog)
581 filelog=self._filelog)
580
582
581 def rev(self):
583 def rev(self):
582 if '_changectx' in self.__dict__:
584 if '_changectx' in self.__dict__:
583 return self._changectx.rev()
585 return self._changectx.rev()
584 return self._filelog.linkrev(self._filenode)
586 return self._filelog.linkrev(self._filenode)
585
587
586 def data(self): return self._repo.wread(self._path)
588 def data(self): return self._repo.wread(self._path)
587 def renamed(self):
589 def renamed(self):
588 rp = self._repopath
590 rp = self._repopath
589 if rp == self._path:
591 if rp == self._path:
590 return None
592 return None
591 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
593 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
592
594
593 def parents(self):
595 def parents(self):
594 '''return parent filectxs, following copies if necessary'''
596 '''return parent filectxs, following copies if necessary'''
595 p = self._path
597 p = self._path
596 rp = self._repopath
598 rp = self._repopath
597 pcl = self._changectx._parents
599 pcl = self._changectx._parents
598 fl = self._filelog
600 fl = self._filelog
599 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
601 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
600 if len(pcl) > 1:
602 if len(pcl) > 1:
601 if rp != p:
603 if rp != p:
602 fl = None
604 fl = None
603 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
605 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
604
606
605 return [filectx(self._repo, p, fileid=n, filelog=l)
607 return [filectx(self._repo, p, fileid=n, filelog=l)
606 for p,n,l in pl if n != nullid]
608 for p,n,l in pl if n != nullid]
607
609
608 def children(self):
610 def children(self):
609 return []
611 return []
610
612
611 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
613 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
612 def date(self):
614 def date(self):
613 t, tz = self._changectx.date()
615 t, tz = self._changectx.date()
614 try:
616 try:
615 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
617 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
616 except OSError, err:
618 except OSError, err:
617 if err.errno != errno.ENOENT: raise
619 if err.errno != errno.ENOENT: raise
618 return (t, tz)
620 return (t, tz)
619
621
620 def cmp(self, text): return self._repo.wread(self._path) == text
622 def cmp(self, text): return self._repo.wread(self._path) == text
@@ -1,1319 +1,1319 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import bin, hex, nullid, nullrev, short
13 from node import bin, hex, nullid, nullrev, short
14 from i18n import _
14 from i18n import _
15 import changegroup, errno, ancestor, mdiff
15 import changegroup, errno, ancestor, mdiff
16 import sha, struct, util, zlib
16 import sha, struct, util, zlib
17
17
18 _pack = struct.pack
18 _pack = struct.pack
19 _unpack = struct.unpack
19 _unpack = struct.unpack
20 _compress = zlib.compress
20 _compress = zlib.compress
21 _decompress = zlib.decompress
21 _decompress = zlib.decompress
22 _sha = sha.new
22 _sha = sha.new
23
23
24 # revlog flags
24 # revlog flags
25 REVLOGV0 = 0
25 REVLOGV0 = 0
26 REVLOGNG = 1
26 REVLOGNG = 1
27 REVLOGNGINLINEDATA = (1 << 16)
27 REVLOGNGINLINEDATA = (1 << 16)
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31
31
32 class RevlogError(Exception):
32 class RevlogError(Exception):
33 pass
33 pass
34
34
35 class LookupError(RevlogError):
35 class LookupError(RevlogError):
36 def __init__(self, name, message=None):
36 def __init__(self, name, index, message):
37 if message is None:
38 message = _('not found: %s') % name
39 RevlogError.__init__(self, message)
40 self.name = name
37 self.name = name
38 if isinstance(name, str) and len(name) == 20:
39 name = short(name)
40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
41
41
42 def getoffset(q):
42 def getoffset(q):
43 return int(q >> 16)
43 return int(q >> 16)
44
44
45 def gettype(q):
45 def gettype(q):
46 return int(q & 0xFFFF)
46 return int(q & 0xFFFF)
47
47
48 def offset_type(offset, type):
48 def offset_type(offset, type):
49 return long(long(offset) << 16 | type)
49 return long(long(offset) << 16 | type)
50
50
51 def hash(text, p1, p2):
51 def hash(text, p1, p2):
52 """generate a hash from the given text and its parent hashes
52 """generate a hash from the given text and its parent hashes
53
53
54 This hash combines both the current file contents and its history
54 This hash combines both the current file contents and its history
55 in a manner that makes it easy to distinguish nodes with the same
55 in a manner that makes it easy to distinguish nodes with the same
56 content in the revision graph.
56 content in the revision graph.
57 """
57 """
58 l = [p1, p2]
58 l = [p1, p2]
59 l.sort()
59 l.sort()
60 s = _sha(l[0])
60 s = _sha(l[0])
61 s.update(l[1])
61 s.update(l[1])
62 s.update(text)
62 s.update(text)
63 return s.digest()
63 return s.digest()
64
64
65 def compress(text):
65 def compress(text):
66 """ generate a possibly-compressed representation of text """
66 """ generate a possibly-compressed representation of text """
67 if not text:
67 if not text:
68 return ("", text)
68 return ("", text)
69 l = len(text)
69 l = len(text)
70 bin = None
70 bin = None
71 if l < 44:
71 if l < 44:
72 pass
72 pass
73 elif l > 1000000:
73 elif l > 1000000:
74 # zlib makes an internal copy, thus doubling memory usage for
74 # zlib makes an internal copy, thus doubling memory usage for
75 # large files, so lets do this in pieces
75 # large files, so lets do this in pieces
76 z = zlib.compressobj()
76 z = zlib.compressobj()
77 p = []
77 p = []
78 pos = 0
78 pos = 0
79 while pos < l:
79 while pos < l:
80 pos2 = pos + 2**20
80 pos2 = pos + 2**20
81 p.append(z.compress(text[pos:pos2]))
81 p.append(z.compress(text[pos:pos2]))
82 pos = pos2
82 pos = pos2
83 p.append(z.flush())
83 p.append(z.flush())
84 if sum(map(len, p)) < l:
84 if sum(map(len, p)) < l:
85 bin = "".join(p)
85 bin = "".join(p)
86 else:
86 else:
87 bin = _compress(text)
87 bin = _compress(text)
88 if bin is None or len(bin) > l:
88 if bin is None or len(bin) > l:
89 if text[0] == '\0':
89 if text[0] == '\0':
90 return ("", text)
90 return ("", text)
91 return ('u', text)
91 return ('u', text)
92 return ("", bin)
92 return ("", bin)
93
93
94 def decompress(bin):
94 def decompress(bin):
95 """ decompress the given input """
95 """ decompress the given input """
96 if not bin:
96 if not bin:
97 return bin
97 return bin
98 t = bin[0]
98 t = bin[0]
99 if t == '\0':
99 if t == '\0':
100 return bin
100 return bin
101 if t == 'x':
101 if t == 'x':
102 return _decompress(bin)
102 return _decompress(bin)
103 if t == 'u':
103 if t == 'u':
104 return bin[1:]
104 return bin[1:]
105 raise RevlogError(_("unknown compression type %r") % t)
105 raise RevlogError(_("unknown compression type %r") % t)
106
106
107 class lazyparser(object):
107 class lazyparser(object):
108 """
108 """
109 this class avoids the need to parse the entirety of large indices
109 this class avoids the need to parse the entirety of large indices
110 """
110 """
111
111
112 # lazyparser is not safe to use on windows if win32 extensions not
112 # lazyparser is not safe to use on windows if win32 extensions not
113 # available. it keeps file handle open, which make it not possible
113 # available. it keeps file handle open, which make it not possible
114 # to break hardlinks on local cloned repos.
114 # to break hardlinks on local cloned repos.
115
115
116 def __init__(self, dataf, size):
116 def __init__(self, dataf, size):
117 self.dataf = dataf
117 self.dataf = dataf
118 self.s = struct.calcsize(indexformatng)
118 self.s = struct.calcsize(indexformatng)
119 self.datasize = size
119 self.datasize = size
120 self.l = size/self.s
120 self.l = size/self.s
121 self.index = [None] * self.l
121 self.index = [None] * self.l
122 self.map = {nullid: nullrev}
122 self.map = {nullid: nullrev}
123 self.allmap = 0
123 self.allmap = 0
124 self.all = 0
124 self.all = 0
125 self.mapfind_count = 0
125 self.mapfind_count = 0
126
126
127 def loadmap(self):
127 def loadmap(self):
128 """
128 """
129 during a commit, we need to make sure the rev being added is
129 during a commit, we need to make sure the rev being added is
130 not a duplicate. This requires loading the entire index,
130 not a duplicate. This requires loading the entire index,
131 which is fairly slow. loadmap can load up just the node map,
131 which is fairly slow. loadmap can load up just the node map,
132 which takes much less time.
132 which takes much less time.
133 """
133 """
134 if self.allmap:
134 if self.allmap:
135 return
135 return
136 end = self.datasize
136 end = self.datasize
137 self.allmap = 1
137 self.allmap = 1
138 cur = 0
138 cur = 0
139 count = 0
139 count = 0
140 blocksize = self.s * 256
140 blocksize = self.s * 256
141 self.dataf.seek(0)
141 self.dataf.seek(0)
142 while cur < end:
142 while cur < end:
143 data = self.dataf.read(blocksize)
143 data = self.dataf.read(blocksize)
144 off = 0
144 off = 0
145 for x in xrange(256):
145 for x in xrange(256):
146 n = data[off + ngshaoffset:off + ngshaoffset + 20]
146 n = data[off + ngshaoffset:off + ngshaoffset + 20]
147 self.map[n] = count
147 self.map[n] = count
148 count += 1
148 count += 1
149 if count >= self.l:
149 if count >= self.l:
150 break
150 break
151 off += self.s
151 off += self.s
152 cur += blocksize
152 cur += blocksize
153
153
154 def loadblock(self, blockstart, blocksize, data=None):
154 def loadblock(self, blockstart, blocksize, data=None):
155 if self.all:
155 if self.all:
156 return
156 return
157 if data is None:
157 if data is None:
158 self.dataf.seek(blockstart)
158 self.dataf.seek(blockstart)
159 if blockstart + blocksize > self.datasize:
159 if blockstart + blocksize > self.datasize:
160 # the revlog may have grown since we've started running,
160 # the revlog may have grown since we've started running,
161 # but we don't have space in self.index for more entries.
161 # but we don't have space in self.index for more entries.
162 # limit blocksize so that we don't get too much data.
162 # limit blocksize so that we don't get too much data.
163 blocksize = max(self.datasize - blockstart, 0)
163 blocksize = max(self.datasize - blockstart, 0)
164 data = self.dataf.read(blocksize)
164 data = self.dataf.read(blocksize)
165 lend = len(data) / self.s
165 lend = len(data) / self.s
166 i = blockstart / self.s
166 i = blockstart / self.s
167 off = 0
167 off = 0
168 # lazyindex supports __delitem__
168 # lazyindex supports __delitem__
169 if lend > len(self.index) - i:
169 if lend > len(self.index) - i:
170 lend = len(self.index) - i
170 lend = len(self.index) - i
171 for x in xrange(lend):
171 for x in xrange(lend):
172 if self.index[i + x] == None:
172 if self.index[i + x] == None:
173 b = data[off : off + self.s]
173 b = data[off : off + self.s]
174 self.index[i + x] = b
174 self.index[i + x] = b
175 n = b[ngshaoffset:ngshaoffset + 20]
175 n = b[ngshaoffset:ngshaoffset + 20]
176 self.map[n] = i + x
176 self.map[n] = i + x
177 off += self.s
177 off += self.s
178
178
179 def findnode(self, node):
179 def findnode(self, node):
180 """search backwards through the index file for a specific node"""
180 """search backwards through the index file for a specific node"""
181 if self.allmap:
181 if self.allmap:
182 return None
182 return None
183
183
184 # hg log will cause many many searches for the manifest
184 # hg log will cause many many searches for the manifest
185 # nodes. After we get called a few times, just load the whole
185 # nodes. After we get called a few times, just load the whole
186 # thing.
186 # thing.
187 if self.mapfind_count > 8:
187 if self.mapfind_count > 8:
188 self.loadmap()
188 self.loadmap()
189 if node in self.map:
189 if node in self.map:
190 return node
190 return node
191 return None
191 return None
192 self.mapfind_count += 1
192 self.mapfind_count += 1
193 last = self.l - 1
193 last = self.l - 1
194 while self.index[last] != None:
194 while self.index[last] != None:
195 if last == 0:
195 if last == 0:
196 self.all = 1
196 self.all = 1
197 self.allmap = 1
197 self.allmap = 1
198 return None
198 return None
199 last -= 1
199 last -= 1
200 end = (last + 1) * self.s
200 end = (last + 1) * self.s
201 blocksize = self.s * 256
201 blocksize = self.s * 256
202 while end >= 0:
202 while end >= 0:
203 start = max(end - blocksize, 0)
203 start = max(end - blocksize, 0)
204 self.dataf.seek(start)
204 self.dataf.seek(start)
205 data = self.dataf.read(end - start)
205 data = self.dataf.read(end - start)
206 findend = end - start
206 findend = end - start
207 while True:
207 while True:
208 # we're searching backwards, so we have to make sure
208 # we're searching backwards, so we have to make sure
209 # we don't find a changeset where this node is a parent
209 # we don't find a changeset where this node is a parent
210 off = data.find(node, 0, findend)
210 off = data.find(node, 0, findend)
211 findend = off
211 findend = off
212 if off >= 0:
212 if off >= 0:
213 i = off / self.s
213 i = off / self.s
214 off = i * self.s
214 off = i * self.s
215 n = data[off + ngshaoffset:off + ngshaoffset + 20]
215 n = data[off + ngshaoffset:off + ngshaoffset + 20]
216 if n == node:
216 if n == node:
217 self.map[n] = i + start / self.s
217 self.map[n] = i + start / self.s
218 return node
218 return node
219 else:
219 else:
220 break
220 break
221 end -= blocksize
221 end -= blocksize
222 return None
222 return None
223
223
224 def loadindex(self, i=None, end=None):
224 def loadindex(self, i=None, end=None):
225 if self.all:
225 if self.all:
226 return
226 return
227 all = False
227 all = False
228 if i == None:
228 if i == None:
229 blockstart = 0
229 blockstart = 0
230 blocksize = (65536 / self.s) * self.s
230 blocksize = (65536 / self.s) * self.s
231 end = self.datasize
231 end = self.datasize
232 all = True
232 all = True
233 else:
233 else:
234 if end:
234 if end:
235 blockstart = i * self.s
235 blockstart = i * self.s
236 end = end * self.s
236 end = end * self.s
237 blocksize = end - blockstart
237 blocksize = end - blockstart
238 else:
238 else:
239 blockstart = (i & ~1023) * self.s
239 blockstart = (i & ~1023) * self.s
240 blocksize = self.s * 1024
240 blocksize = self.s * 1024
241 end = blockstart + blocksize
241 end = blockstart + blocksize
242 while blockstart < end:
242 while blockstart < end:
243 self.loadblock(blockstart, blocksize)
243 self.loadblock(blockstart, blocksize)
244 blockstart += blocksize
244 blockstart += blocksize
245 if all:
245 if all:
246 self.all = True
246 self.all = True
247
247
248 class lazyindex(object):
248 class lazyindex(object):
249 """a lazy version of the index array"""
249 """a lazy version of the index array"""
250 def __init__(self, parser):
250 def __init__(self, parser):
251 self.p = parser
251 self.p = parser
252 def __len__(self):
252 def __len__(self):
253 return len(self.p.index)
253 return len(self.p.index)
254 def load(self, pos):
254 def load(self, pos):
255 if pos < 0:
255 if pos < 0:
256 pos += len(self.p.index)
256 pos += len(self.p.index)
257 self.p.loadindex(pos)
257 self.p.loadindex(pos)
258 return self.p.index[pos]
258 return self.p.index[pos]
259 def __getitem__(self, pos):
259 def __getitem__(self, pos):
260 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
260 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
261 def __setitem__(self, pos, item):
261 def __setitem__(self, pos, item):
262 self.p.index[pos] = _pack(indexformatng, *item)
262 self.p.index[pos] = _pack(indexformatng, *item)
263 def __delitem__(self, pos):
263 def __delitem__(self, pos):
264 del self.p.index[pos]
264 del self.p.index[pos]
265 def insert(self, pos, e):
265 def insert(self, pos, e):
266 self.p.index.insert(pos, _pack(indexformatng, *e))
266 self.p.index.insert(pos, _pack(indexformatng, *e))
267 def append(self, e):
267 def append(self, e):
268 self.p.index.append(_pack(indexformatng, *e))
268 self.p.index.append(_pack(indexformatng, *e))
269
269
270 class lazymap(object):
270 class lazymap(object):
271 """a lazy version of the node map"""
271 """a lazy version of the node map"""
272 def __init__(self, parser):
272 def __init__(self, parser):
273 self.p = parser
273 self.p = parser
274 def load(self, key):
274 def load(self, key):
275 n = self.p.findnode(key)
275 n = self.p.findnode(key)
276 if n == None:
276 if n == None:
277 raise KeyError(key)
277 raise KeyError(key)
278 def __contains__(self, key):
278 def __contains__(self, key):
279 if key in self.p.map:
279 if key in self.p.map:
280 return True
280 return True
281 self.p.loadmap()
281 self.p.loadmap()
282 return key in self.p.map
282 return key in self.p.map
283 def __iter__(self):
283 def __iter__(self):
284 yield nullid
284 yield nullid
285 for i in xrange(self.p.l):
285 for i in xrange(self.p.l):
286 ret = self.p.index[i]
286 ret = self.p.index[i]
287 if not ret:
287 if not ret:
288 self.p.loadindex(i)
288 self.p.loadindex(i)
289 ret = self.p.index[i]
289 ret = self.p.index[i]
290 if isinstance(ret, str):
290 if isinstance(ret, str):
291 ret = _unpack(indexformatng, ret)
291 ret = _unpack(indexformatng, ret)
292 yield ret[7]
292 yield ret[7]
293 def __getitem__(self, key):
293 def __getitem__(self, key):
294 try:
294 try:
295 return self.p.map[key]
295 return self.p.map[key]
296 except KeyError:
296 except KeyError:
297 try:
297 try:
298 self.load(key)
298 self.load(key)
299 return self.p.map[key]
299 return self.p.map[key]
300 except KeyError:
300 except KeyError:
301 raise KeyError("node " + hex(key))
301 raise KeyError("node " + hex(key))
302 def __setitem__(self, key, val):
302 def __setitem__(self, key, val):
303 self.p.map[key] = val
303 self.p.map[key] = val
304 def __delitem__(self, key):
304 def __delitem__(self, key):
305 del self.p.map[key]
305 del self.p.map[key]
306
306
307 indexformatv0 = ">4l20s20s20s"
307 indexformatv0 = ">4l20s20s20s"
308 v0shaoffset = 56
308 v0shaoffset = 56
309
309
310 class revlogoldio(object):
310 class revlogoldio(object):
311 def __init__(self):
311 def __init__(self):
312 self.size = struct.calcsize(indexformatv0)
312 self.size = struct.calcsize(indexformatv0)
313
313
314 def parseindex(self, fp, inline):
314 def parseindex(self, fp, inline):
315 s = self.size
315 s = self.size
316 index = []
316 index = []
317 nodemap = {nullid: nullrev}
317 nodemap = {nullid: nullrev}
318 n = off = 0
318 n = off = 0
319 data = fp.read()
319 data = fp.read()
320 l = len(data)
320 l = len(data)
321 while off + s <= l:
321 while off + s <= l:
322 cur = data[off:off + s]
322 cur = data[off:off + s]
323 off += s
323 off += s
324 e = _unpack(indexformatv0, cur)
324 e = _unpack(indexformatv0, cur)
325 # transform to revlogv1 format
325 # transform to revlogv1 format
326 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
326 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
327 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
327 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
328 index.append(e2)
328 index.append(e2)
329 nodemap[e[6]] = n
329 nodemap[e[6]] = n
330 n += 1
330 n += 1
331
331
332 return index, nodemap, None
332 return index, nodemap, None
333
333
334 def packentry(self, entry, node, version, rev):
334 def packentry(self, entry, node, version, rev):
335 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
335 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
336 node(entry[5]), node(entry[6]), entry[7])
336 node(entry[5]), node(entry[6]), entry[7])
337 return _pack(indexformatv0, *e2)
337 return _pack(indexformatv0, *e2)
338
338
339 # index ng:
339 # index ng:
340 # 6 bytes offset
340 # 6 bytes offset
341 # 2 bytes flags
341 # 2 bytes flags
342 # 4 bytes compressed length
342 # 4 bytes compressed length
343 # 4 bytes uncompressed length
343 # 4 bytes uncompressed length
344 # 4 bytes: base rev
344 # 4 bytes: base rev
345 # 4 bytes link rev
345 # 4 bytes link rev
346 # 4 bytes parent 1 rev
346 # 4 bytes parent 1 rev
347 # 4 bytes parent 2 rev
347 # 4 bytes parent 2 rev
348 # 32 bytes: nodeid
348 # 32 bytes: nodeid
349 indexformatng = ">Qiiiiii20s12x"
349 indexformatng = ">Qiiiiii20s12x"
350 ngshaoffset = 32
350 ngshaoffset = 32
351 versionformat = ">I"
351 versionformat = ">I"
352
352
353 class revlogio(object):
353 class revlogio(object):
354 def __init__(self):
354 def __init__(self):
355 self.size = struct.calcsize(indexformatng)
355 self.size = struct.calcsize(indexformatng)
356
356
357 def parseindex(self, fp, inline):
357 def parseindex(self, fp, inline):
358 try:
358 try:
359 size = util.fstat(fp).st_size
359 size = util.fstat(fp).st_size
360 except AttributeError:
360 except AttributeError:
361 size = 0
361 size = 0
362
362
363 if util.openhardlinks() and not inline and size > 1000000:
363 if util.openhardlinks() and not inline and size > 1000000:
364 # big index, let's parse it on demand
364 # big index, let's parse it on demand
365 parser = lazyparser(fp, size)
365 parser = lazyparser(fp, size)
366 index = lazyindex(parser)
366 index = lazyindex(parser)
367 nodemap = lazymap(parser)
367 nodemap = lazymap(parser)
368 e = list(index[0])
368 e = list(index[0])
369 type = gettype(e[0])
369 type = gettype(e[0])
370 e[0] = offset_type(0, type)
370 e[0] = offset_type(0, type)
371 index[0] = e
371 index[0] = e
372 return index, nodemap, None
372 return index, nodemap, None
373
373
374 s = self.size
374 s = self.size
375 cache = None
375 cache = None
376 index = []
376 index = []
377 nodemap = {nullid: nullrev}
377 nodemap = {nullid: nullrev}
378 n = off = 0
378 n = off = 0
379 # if we're not using lazymap, always read the whole index
379 # if we're not using lazymap, always read the whole index
380 data = fp.read()
380 data = fp.read()
381 l = len(data) - s
381 l = len(data) - s
382 append = index.append
382 append = index.append
383 if inline:
383 if inline:
384 cache = (0, data)
384 cache = (0, data)
385 while off <= l:
385 while off <= l:
386 e = _unpack(indexformatng, data[off:off + s])
386 e = _unpack(indexformatng, data[off:off + s])
387 nodemap[e[7]] = n
387 nodemap[e[7]] = n
388 append(e)
388 append(e)
389 n += 1
389 n += 1
390 if e[1] < 0:
390 if e[1] < 0:
391 break
391 break
392 off += e[1] + s
392 off += e[1] + s
393 else:
393 else:
394 while off <= l:
394 while off <= l:
395 e = _unpack(indexformatng, data[off:off + s])
395 e = _unpack(indexformatng, data[off:off + s])
396 nodemap[e[7]] = n
396 nodemap[e[7]] = n
397 append(e)
397 append(e)
398 n += 1
398 n += 1
399 off += s
399 off += s
400
400
401 e = list(index[0])
401 e = list(index[0])
402 type = gettype(e[0])
402 type = gettype(e[0])
403 e[0] = offset_type(0, type)
403 e[0] = offset_type(0, type)
404 index[0] = e
404 index[0] = e
405
405
406 return index, nodemap, cache
406 return index, nodemap, cache
407
407
408 def packentry(self, entry, node, version, rev):
408 def packentry(self, entry, node, version, rev):
409 p = _pack(indexformatng, *entry)
409 p = _pack(indexformatng, *entry)
410 if rev == 0:
410 if rev == 0:
411 p = _pack(versionformat, version) + p[4:]
411 p = _pack(versionformat, version) + p[4:]
412 return p
412 return p
413
413
414 class revlog(object):
414 class revlog(object):
415 """
415 """
416 the underlying revision storage object
416 the underlying revision storage object
417
417
418 A revlog consists of two parts, an index and the revision data.
418 A revlog consists of two parts, an index and the revision data.
419
419
420 The index is a file with a fixed record size containing
420 The index is a file with a fixed record size containing
421 information on each revision, includings its nodeid (hash), the
421 information on each revision, includings its nodeid (hash), the
422 nodeids of its parents, the position and offset of its data within
422 nodeids of its parents, the position and offset of its data within
423 the data file, and the revision it's based on. Finally, each entry
423 the data file, and the revision it's based on. Finally, each entry
424 contains a linkrev entry that can serve as a pointer to external
424 contains a linkrev entry that can serve as a pointer to external
425 data.
425 data.
426
426
427 The revision data itself is a linear collection of data chunks.
427 The revision data itself is a linear collection of data chunks.
428 Each chunk represents a revision and is usually represented as a
428 Each chunk represents a revision and is usually represented as a
429 delta against the previous chunk. To bound lookup time, runs of
429 delta against the previous chunk. To bound lookup time, runs of
430 deltas are limited to about 2 times the length of the original
430 deltas are limited to about 2 times the length of the original
431 version data. This makes retrieval of a version proportional to
431 version data. This makes retrieval of a version proportional to
432 its size, or O(1) relative to the number of revisions.
432 its size, or O(1) relative to the number of revisions.
433
433
434 Both pieces of the revlog are written to in an append-only
434 Both pieces of the revlog are written to in an append-only
435 fashion, which means we never need to rewrite a file to insert or
435 fashion, which means we never need to rewrite a file to insert or
436 remove data, and can use some simple techniques to avoid the need
436 remove data, and can use some simple techniques to avoid the need
437 for locking while reading.
437 for locking while reading.
438 """
438 """
439 def __init__(self, opener, indexfile):
439 def __init__(self, opener, indexfile):
440 """
440 """
441 create a revlog object
441 create a revlog object
442
442
443 opener is a function that abstracts the file opening operation
443 opener is a function that abstracts the file opening operation
444 and can be used to implement COW semantics or the like.
444 and can be used to implement COW semantics or the like.
445 """
445 """
446 self.indexfile = indexfile
446 self.indexfile = indexfile
447 self.datafile = indexfile[:-2] + ".d"
447 self.datafile = indexfile[:-2] + ".d"
448 self.opener = opener
448 self.opener = opener
449 self._cache = None
449 self._cache = None
450 self._chunkcache = None
450 self._chunkcache = None
451 self.nodemap = {nullid: nullrev}
451 self.nodemap = {nullid: nullrev}
452 self.index = []
452 self.index = []
453
453
454 v = REVLOG_DEFAULT_VERSION
454 v = REVLOG_DEFAULT_VERSION
455 if hasattr(opener, "defversion"):
455 if hasattr(opener, "defversion"):
456 v = opener.defversion
456 v = opener.defversion
457 if v & REVLOGNG:
457 if v & REVLOGNG:
458 v |= REVLOGNGINLINEDATA
458 v |= REVLOGNGINLINEDATA
459
459
460 i = ""
460 i = ""
461 try:
461 try:
462 f = self.opener(self.indexfile)
462 f = self.opener(self.indexfile)
463 i = f.read(4)
463 i = f.read(4)
464 f.seek(0)
464 f.seek(0)
465 if len(i) > 0:
465 if len(i) > 0:
466 v = struct.unpack(versionformat, i)[0]
466 v = struct.unpack(versionformat, i)[0]
467 except IOError, inst:
467 except IOError, inst:
468 if inst.errno != errno.ENOENT:
468 if inst.errno != errno.ENOENT:
469 raise
469 raise
470
470
471 self.version = v
471 self.version = v
472 self._inline = v & REVLOGNGINLINEDATA
472 self._inline = v & REVLOGNGINLINEDATA
473 flags = v & ~0xFFFF
473 flags = v & ~0xFFFF
474 fmt = v & 0xFFFF
474 fmt = v & 0xFFFF
475 if fmt == REVLOGV0 and flags:
475 if fmt == REVLOGV0 and flags:
476 raise RevlogError(_("index %s unknown flags %#04x for format v0")
476 raise RevlogError(_("index %s unknown flags %#04x for format v0")
477 % (self.indexfile, flags >> 16))
477 % (self.indexfile, flags >> 16))
478 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
478 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
479 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
479 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
480 % (self.indexfile, flags >> 16))
480 % (self.indexfile, flags >> 16))
481 elif fmt > REVLOGNG:
481 elif fmt > REVLOGNG:
482 raise RevlogError(_("index %s unknown format %d")
482 raise RevlogError(_("index %s unknown format %d")
483 % (self.indexfile, fmt))
483 % (self.indexfile, fmt))
484
484
485 self._io = revlogio()
485 self._io = revlogio()
486 if self.version == REVLOGV0:
486 if self.version == REVLOGV0:
487 self._io = revlogoldio()
487 self._io = revlogoldio()
488 if i:
488 if i:
489 d = self._io.parseindex(f, self._inline)
489 d = self._io.parseindex(f, self._inline)
490 self.index, self.nodemap, self._chunkcache = d
490 self.index, self.nodemap, self._chunkcache = d
491
491
492 # add the magic null revision at -1
492 # add the magic null revision at -1
493 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
493 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
494
494
495 def _loadindex(self, start, end):
495 def _loadindex(self, start, end):
496 """load a block of indexes all at once from the lazy parser"""
496 """load a block of indexes all at once from the lazy parser"""
497 if isinstance(self.index, lazyindex):
497 if isinstance(self.index, lazyindex):
498 self.index.p.loadindex(start, end)
498 self.index.p.loadindex(start, end)
499
499
500 def _loadindexmap(self):
500 def _loadindexmap(self):
501 """loads both the map and the index from the lazy parser"""
501 """loads both the map and the index from the lazy parser"""
502 if isinstance(self.index, lazyindex):
502 if isinstance(self.index, lazyindex):
503 p = self.index.p
503 p = self.index.p
504 p.loadindex()
504 p.loadindex()
505 self.nodemap = p.map
505 self.nodemap = p.map
506
506
507 def _loadmap(self):
507 def _loadmap(self):
508 """loads the map from the lazy parser"""
508 """loads the map from the lazy parser"""
509 if isinstance(self.nodemap, lazymap):
509 if isinstance(self.nodemap, lazymap):
510 self.nodemap.p.loadmap()
510 self.nodemap.p.loadmap()
511 self.nodemap = self.nodemap.p.map
511 self.nodemap = self.nodemap.p.map
512
512
513 def tip(self):
513 def tip(self):
514 return self.node(len(self.index) - 2)
514 return self.node(len(self.index) - 2)
515 def count(self):
515 def count(self):
516 return len(self.index) - 1
516 return len(self.index) - 1
517
517
518 def rev(self, node):
518 def rev(self, node):
519 try:
519 try:
520 return self.nodemap[node]
520 return self.nodemap[node]
521 except KeyError:
521 except KeyError:
522 raise LookupError(hex(node), _('%s: no node %s') % (self.indexfile, hex(node)))
522 raise LookupError(node, self.indexfile, _('no node'))
523 def node(self, rev):
523 def node(self, rev):
524 return self.index[rev][7]
524 return self.index[rev][7]
525 def linkrev(self, node):
525 def linkrev(self, node):
526 return self.index[self.rev(node)][4]
526 return self.index[self.rev(node)][4]
527 def parents(self, node):
527 def parents(self, node):
528 d = self.index[self.rev(node)][5:7]
528 d = self.index[self.rev(node)][5:7]
529 return (self.node(d[0]), self.node(d[1]))
529 return (self.node(d[0]), self.node(d[1]))
530 def parentrevs(self, rev):
530 def parentrevs(self, rev):
531 return self.index[rev][5:7]
531 return self.index[rev][5:7]
532 def start(self, rev):
532 def start(self, rev):
533 return int(self.index[rev][0] >> 16)
533 return int(self.index[rev][0] >> 16)
534 def end(self, rev):
534 def end(self, rev):
535 return self.start(rev) + self.length(rev)
535 return self.start(rev) + self.length(rev)
536 def length(self, rev):
536 def length(self, rev):
537 return self.index[rev][1]
537 return self.index[rev][1]
538 def base(self, rev):
538 def base(self, rev):
539 return self.index[rev][3]
539 return self.index[rev][3]
540
540
541 def size(self, rev):
541 def size(self, rev):
542 """return the length of the uncompressed text for a given revision"""
542 """return the length of the uncompressed text for a given revision"""
543 l = self.index[rev][2]
543 l = self.index[rev][2]
544 if l >= 0:
544 if l >= 0:
545 return l
545 return l
546
546
547 t = self.revision(self.node(rev))
547 t = self.revision(self.node(rev))
548 return len(t)
548 return len(t)
549
549
550 # alternate implementation, The advantage to this code is it
550 # alternate implementation, The advantage to this code is it
551 # will be faster for a single revision. But, the results are not
551 # will be faster for a single revision. But, the results are not
552 # cached, so finding the size of every revision will be slower.
552 # cached, so finding the size of every revision will be slower.
553 """
553 """
554 if self.cache and self.cache[1] == rev:
554 if self.cache and self.cache[1] == rev:
555 return len(self.cache[2])
555 return len(self.cache[2])
556
556
557 base = self.base(rev)
557 base = self.base(rev)
558 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
558 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
559 base = self.cache[1]
559 base = self.cache[1]
560 text = self.cache[2]
560 text = self.cache[2]
561 else:
561 else:
562 text = self.revision(self.node(base))
562 text = self.revision(self.node(base))
563
563
564 l = len(text)
564 l = len(text)
565 for x in xrange(base + 1, rev + 1):
565 for x in xrange(base + 1, rev + 1):
566 l = mdiff.patchedsize(l, self.chunk(x))
566 l = mdiff.patchedsize(l, self.chunk(x))
567 return l
567 return l
568 """
568 """
569
569
570 def reachable(self, node, stop=None):
570 def reachable(self, node, stop=None):
571 """return a hash of all nodes ancestral to a given node, including
571 """return a hash of all nodes ancestral to a given node, including
572 the node itself, stopping when stop is matched"""
572 the node itself, stopping when stop is matched"""
573 reachable = {}
573 reachable = {}
574 visit = [node]
574 visit = [node]
575 reachable[node] = 1
575 reachable[node] = 1
576 if stop:
576 if stop:
577 stopn = self.rev(stop)
577 stopn = self.rev(stop)
578 else:
578 else:
579 stopn = 0
579 stopn = 0
580 while visit:
580 while visit:
581 n = visit.pop(0)
581 n = visit.pop(0)
582 if n == stop:
582 if n == stop:
583 continue
583 continue
584 if n == nullid:
584 if n == nullid:
585 continue
585 continue
586 for p in self.parents(n):
586 for p in self.parents(n):
587 if self.rev(p) < stopn:
587 if self.rev(p) < stopn:
588 continue
588 continue
589 if p not in reachable:
589 if p not in reachable:
590 reachable[p] = 1
590 reachable[p] = 1
591 visit.append(p)
591 visit.append(p)
592 return reachable
592 return reachable
593
593
594 def nodesbetween(self, roots=None, heads=None):
594 def nodesbetween(self, roots=None, heads=None):
595 """Return a tuple containing three elements. Elements 1 and 2 contain
595 """Return a tuple containing three elements. Elements 1 and 2 contain
596 a final list bases and heads after all the unreachable ones have been
596 a final list bases and heads after all the unreachable ones have been
597 pruned. Element 0 contains a topologically sorted list of all
597 pruned. Element 0 contains a topologically sorted list of all
598
598
599 nodes that satisfy these constraints:
599 nodes that satisfy these constraints:
600 1. All nodes must be descended from a node in roots (the nodes on
600 1. All nodes must be descended from a node in roots (the nodes on
601 roots are considered descended from themselves).
601 roots are considered descended from themselves).
602 2. All nodes must also be ancestors of a node in heads (the nodes in
602 2. All nodes must also be ancestors of a node in heads (the nodes in
603 heads are considered to be their own ancestors).
603 heads are considered to be their own ancestors).
604
604
605 If roots is unspecified, nullid is assumed as the only root.
605 If roots is unspecified, nullid is assumed as the only root.
606 If heads is unspecified, it is taken to be the output of the
606 If heads is unspecified, it is taken to be the output of the
607 heads method (i.e. a list of all nodes in the repository that
607 heads method (i.e. a list of all nodes in the repository that
608 have no children)."""
608 have no children)."""
609 nonodes = ([], [], [])
609 nonodes = ([], [], [])
610 if roots is not None:
610 if roots is not None:
611 roots = list(roots)
611 roots = list(roots)
612 if not roots:
612 if not roots:
613 return nonodes
613 return nonodes
614 lowestrev = min([self.rev(n) for n in roots])
614 lowestrev = min([self.rev(n) for n in roots])
615 else:
615 else:
616 roots = [nullid] # Everybody's a descendent of nullid
616 roots = [nullid] # Everybody's a descendent of nullid
617 lowestrev = nullrev
617 lowestrev = nullrev
618 if (lowestrev == nullrev) and (heads is None):
618 if (lowestrev == nullrev) and (heads is None):
619 # We want _all_ the nodes!
619 # We want _all_ the nodes!
620 return ([self.node(r) for r in xrange(0, self.count())],
620 return ([self.node(r) for r in xrange(0, self.count())],
621 [nullid], list(self.heads()))
621 [nullid], list(self.heads()))
622 if heads is None:
622 if heads is None:
623 # All nodes are ancestors, so the latest ancestor is the last
623 # All nodes are ancestors, so the latest ancestor is the last
624 # node.
624 # node.
625 highestrev = self.count() - 1
625 highestrev = self.count() - 1
626 # Set ancestors to None to signal that every node is an ancestor.
626 # Set ancestors to None to signal that every node is an ancestor.
627 ancestors = None
627 ancestors = None
628 # Set heads to an empty dictionary for later discovery of heads
628 # Set heads to an empty dictionary for later discovery of heads
629 heads = {}
629 heads = {}
630 else:
630 else:
631 heads = list(heads)
631 heads = list(heads)
632 if not heads:
632 if not heads:
633 return nonodes
633 return nonodes
634 ancestors = {}
634 ancestors = {}
635 # Turn heads into a dictionary so we can remove 'fake' heads.
635 # Turn heads into a dictionary so we can remove 'fake' heads.
636 # Also, later we will be using it to filter out the heads we can't
636 # Also, later we will be using it to filter out the heads we can't
637 # find from roots.
637 # find from roots.
638 heads = dict.fromkeys(heads, 0)
638 heads = dict.fromkeys(heads, 0)
639 # Start at the top and keep marking parents until we're done.
639 # Start at the top and keep marking parents until we're done.
640 nodestotag = heads.keys()
640 nodestotag = heads.keys()
641 # Remember where the top was so we can use it as a limit later.
641 # Remember where the top was so we can use it as a limit later.
642 highestrev = max([self.rev(n) for n in nodestotag])
642 highestrev = max([self.rev(n) for n in nodestotag])
643 while nodestotag:
643 while nodestotag:
644 # grab a node to tag
644 # grab a node to tag
645 n = nodestotag.pop()
645 n = nodestotag.pop()
646 # Never tag nullid
646 # Never tag nullid
647 if n == nullid:
647 if n == nullid:
648 continue
648 continue
649 # A node's revision number represents its place in a
649 # A node's revision number represents its place in a
650 # topologically sorted list of nodes.
650 # topologically sorted list of nodes.
651 r = self.rev(n)
651 r = self.rev(n)
652 if r >= lowestrev:
652 if r >= lowestrev:
653 if n not in ancestors:
653 if n not in ancestors:
654 # If we are possibly a descendent of one of the roots
654 # If we are possibly a descendent of one of the roots
655 # and we haven't already been marked as an ancestor
655 # and we haven't already been marked as an ancestor
656 ancestors[n] = 1 # Mark as ancestor
656 ancestors[n] = 1 # Mark as ancestor
657 # Add non-nullid parents to list of nodes to tag.
657 # Add non-nullid parents to list of nodes to tag.
658 nodestotag.extend([p for p in self.parents(n) if
658 nodestotag.extend([p for p in self.parents(n) if
659 p != nullid])
659 p != nullid])
660 elif n in heads: # We've seen it before, is it a fake head?
660 elif n in heads: # We've seen it before, is it a fake head?
661 # So it is, real heads should not be the ancestors of
661 # So it is, real heads should not be the ancestors of
662 # any other heads.
662 # any other heads.
663 heads.pop(n)
663 heads.pop(n)
664 if not ancestors:
664 if not ancestors:
665 return nonodes
665 return nonodes
666 # Now that we have our set of ancestors, we want to remove any
666 # Now that we have our set of ancestors, we want to remove any
667 # roots that are not ancestors.
667 # roots that are not ancestors.
668
668
669 # If one of the roots was nullid, everything is included anyway.
669 # If one of the roots was nullid, everything is included anyway.
670 if lowestrev > nullrev:
670 if lowestrev > nullrev:
671 # But, since we weren't, let's recompute the lowest rev to not
671 # But, since we weren't, let's recompute the lowest rev to not
672 # include roots that aren't ancestors.
672 # include roots that aren't ancestors.
673
673
674 # Filter out roots that aren't ancestors of heads
674 # Filter out roots that aren't ancestors of heads
675 roots = [n for n in roots if n in ancestors]
675 roots = [n for n in roots if n in ancestors]
676 # Recompute the lowest revision
676 # Recompute the lowest revision
677 if roots:
677 if roots:
678 lowestrev = min([self.rev(n) for n in roots])
678 lowestrev = min([self.rev(n) for n in roots])
679 else:
679 else:
680 # No more roots? Return empty list
680 # No more roots? Return empty list
681 return nonodes
681 return nonodes
682 else:
682 else:
683 # We are descending from nullid, and don't need to care about
683 # We are descending from nullid, and don't need to care about
684 # any other roots.
684 # any other roots.
685 lowestrev = nullrev
685 lowestrev = nullrev
686 roots = [nullid]
686 roots = [nullid]
687 # Transform our roots list into a 'set' (i.e. a dictionary where the
687 # Transform our roots list into a 'set' (i.e. a dictionary where the
688 # values don't matter.
688 # values don't matter.
689 descendents = dict.fromkeys(roots, 1)
689 descendents = dict.fromkeys(roots, 1)
690 # Also, keep the original roots so we can filter out roots that aren't
690 # Also, keep the original roots so we can filter out roots that aren't
691 # 'real' roots (i.e. are descended from other roots).
691 # 'real' roots (i.e. are descended from other roots).
692 roots = descendents.copy()
692 roots = descendents.copy()
693 # Our topologically sorted list of output nodes.
693 # Our topologically sorted list of output nodes.
694 orderedout = []
694 orderedout = []
695 # Don't start at nullid since we don't want nullid in our output list,
695 # Don't start at nullid since we don't want nullid in our output list,
696 # and if nullid shows up in descedents, empty parents will look like
696 # and if nullid shows up in descedents, empty parents will look like
697 # they're descendents.
697 # they're descendents.
698 for r in xrange(max(lowestrev, 0), highestrev + 1):
698 for r in xrange(max(lowestrev, 0), highestrev + 1):
699 n = self.node(r)
699 n = self.node(r)
700 isdescendent = False
700 isdescendent = False
701 if lowestrev == nullrev: # Everybody is a descendent of nullid
701 if lowestrev == nullrev: # Everybody is a descendent of nullid
702 isdescendent = True
702 isdescendent = True
703 elif n in descendents:
703 elif n in descendents:
704 # n is already a descendent
704 # n is already a descendent
705 isdescendent = True
705 isdescendent = True
706 # This check only needs to be done here because all the roots
706 # This check only needs to be done here because all the roots
707 # will start being marked is descendents before the loop.
707 # will start being marked is descendents before the loop.
708 if n in roots:
708 if n in roots:
709 # If n was a root, check if it's a 'real' root.
709 # If n was a root, check if it's a 'real' root.
710 p = tuple(self.parents(n))
710 p = tuple(self.parents(n))
711 # If any of its parents are descendents, it's not a root.
711 # If any of its parents are descendents, it's not a root.
712 if (p[0] in descendents) or (p[1] in descendents):
712 if (p[0] in descendents) or (p[1] in descendents):
713 roots.pop(n)
713 roots.pop(n)
714 else:
714 else:
715 p = tuple(self.parents(n))
715 p = tuple(self.parents(n))
716 # A node is a descendent if either of its parents are
716 # A node is a descendent if either of its parents are
717 # descendents. (We seeded the dependents list with the roots
717 # descendents. (We seeded the dependents list with the roots
718 # up there, remember?)
718 # up there, remember?)
719 if (p[0] in descendents) or (p[1] in descendents):
719 if (p[0] in descendents) or (p[1] in descendents):
720 descendents[n] = 1
720 descendents[n] = 1
721 isdescendent = True
721 isdescendent = True
722 if isdescendent and ((ancestors is None) or (n in ancestors)):
722 if isdescendent and ((ancestors is None) or (n in ancestors)):
723 # Only include nodes that are both descendents and ancestors.
723 # Only include nodes that are both descendents and ancestors.
724 orderedout.append(n)
724 orderedout.append(n)
725 if (ancestors is not None) and (n in heads):
725 if (ancestors is not None) and (n in heads):
726 # We're trying to figure out which heads are reachable
726 # We're trying to figure out which heads are reachable
727 # from roots.
727 # from roots.
728 # Mark this head as having been reached
728 # Mark this head as having been reached
729 heads[n] = 1
729 heads[n] = 1
730 elif ancestors is None:
730 elif ancestors is None:
731 # Otherwise, we're trying to discover the heads.
731 # Otherwise, we're trying to discover the heads.
732 # Assume this is a head because if it isn't, the next step
732 # Assume this is a head because if it isn't, the next step
733 # will eventually remove it.
733 # will eventually remove it.
734 heads[n] = 1
734 heads[n] = 1
735 # But, obviously its parents aren't.
735 # But, obviously its parents aren't.
736 for p in self.parents(n):
736 for p in self.parents(n):
737 heads.pop(p, None)
737 heads.pop(p, None)
738 heads = [n for n in heads.iterkeys() if heads[n] != 0]
738 heads = [n for n in heads.iterkeys() if heads[n] != 0]
739 roots = roots.keys()
739 roots = roots.keys()
740 assert orderedout
740 assert orderedout
741 assert roots
741 assert roots
742 assert heads
742 assert heads
743 return (orderedout, roots, heads)
743 return (orderedout, roots, heads)
744
744
745 def heads(self, start=None, stop=None):
745 def heads(self, start=None, stop=None):
746 """return the list of all nodes that have no children
746 """return the list of all nodes that have no children
747
747
748 if start is specified, only heads that are descendants of
748 if start is specified, only heads that are descendants of
749 start will be returned
749 start will be returned
750 if stop is specified, it will consider all the revs from stop
750 if stop is specified, it will consider all the revs from stop
751 as if they had no children
751 as if they had no children
752 """
752 """
753 if start is None and stop is None:
753 if start is None and stop is None:
754 count = self.count()
754 count = self.count()
755 if not count:
755 if not count:
756 return [nullid]
756 return [nullid]
757 ishead = [1] * (count + 1)
757 ishead = [1] * (count + 1)
758 index = self.index
758 index = self.index
759 for r in xrange(count):
759 for r in xrange(count):
760 e = index[r]
760 e = index[r]
761 ishead[e[5]] = ishead[e[6]] = 0
761 ishead[e[5]] = ishead[e[6]] = 0
762 return [self.node(r) for r in xrange(count) if ishead[r]]
762 return [self.node(r) for r in xrange(count) if ishead[r]]
763
763
764 if start is None:
764 if start is None:
765 start = nullid
765 start = nullid
766 if stop is None:
766 if stop is None:
767 stop = []
767 stop = []
768 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
768 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
769 startrev = self.rev(start)
769 startrev = self.rev(start)
770 reachable = {startrev: 1}
770 reachable = {startrev: 1}
771 heads = {startrev: 1}
771 heads = {startrev: 1}
772
772
773 parentrevs = self.parentrevs
773 parentrevs = self.parentrevs
774 for r in xrange(startrev + 1, self.count()):
774 for r in xrange(startrev + 1, self.count()):
775 for p in parentrevs(r):
775 for p in parentrevs(r):
776 if p in reachable:
776 if p in reachable:
777 if r not in stoprevs:
777 if r not in stoprevs:
778 reachable[r] = 1
778 reachable[r] = 1
779 heads[r] = 1
779 heads[r] = 1
780 if p in heads and p not in stoprevs:
780 if p in heads and p not in stoprevs:
781 del heads[p]
781 del heads[p]
782
782
783 return [self.node(r) for r in heads]
783 return [self.node(r) for r in heads]
784
784
785 def children(self, node):
785 def children(self, node):
786 """find the children of a given node"""
786 """find the children of a given node"""
787 c = []
787 c = []
788 p = self.rev(node)
788 p = self.rev(node)
789 for r in range(p + 1, self.count()):
789 for r in range(p + 1, self.count()):
790 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
790 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
791 if prevs:
791 if prevs:
792 for pr in prevs:
792 for pr in prevs:
793 if pr == p:
793 if pr == p:
794 c.append(self.node(r))
794 c.append(self.node(r))
795 elif p == nullrev:
795 elif p == nullrev:
796 c.append(self.node(r))
796 c.append(self.node(r))
797 return c
797 return c
798
798
799 def _match(self, id):
799 def _match(self, id):
800 if isinstance(id, (long, int)):
800 if isinstance(id, (long, int)):
801 # rev
801 # rev
802 return self.node(id)
802 return self.node(id)
803 if len(id) == 20:
803 if len(id) == 20:
804 # possibly a binary node
804 # possibly a binary node
805 # odds of a binary node being all hex in ASCII are 1 in 10**25
805 # odds of a binary node being all hex in ASCII are 1 in 10**25
806 try:
806 try:
807 node = id
807 node = id
808 r = self.rev(node) # quick search the index
808 r = self.rev(node) # quick search the index
809 return node
809 return node
810 except LookupError:
810 except LookupError:
811 pass # may be partial hex id
811 pass # may be partial hex id
812 try:
812 try:
813 # str(rev)
813 # str(rev)
814 rev = int(id)
814 rev = int(id)
815 if str(rev) != id:
815 if str(rev) != id:
816 raise ValueError
816 raise ValueError
817 if rev < 0:
817 if rev < 0:
818 rev = self.count() + rev
818 rev = self.count() + rev
819 if rev < 0 or rev >= self.count():
819 if rev < 0 or rev >= self.count():
820 raise ValueError
820 raise ValueError
821 return self.node(rev)
821 return self.node(rev)
822 except (ValueError, OverflowError):
822 except (ValueError, OverflowError):
823 pass
823 pass
824 if len(id) == 40:
824 if len(id) == 40:
825 try:
825 try:
826 # a full hex nodeid?
826 # a full hex nodeid?
827 node = bin(id)
827 node = bin(id)
828 r = self.rev(node)
828 r = self.rev(node)
829 return node
829 return node
830 except TypeError:
830 except TypeError:
831 pass
831 pass
832
832
833 def _partialmatch(self, id):
833 def _partialmatch(self, id):
834 if len(id) < 40:
834 if len(id) < 40:
835 try:
835 try:
836 # hex(node)[:...]
836 # hex(node)[:...]
837 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
837 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
838 node = None
838 node = None
839 for n in self.nodemap:
839 for n in self.nodemap:
840 if n.startswith(bin_id) and hex(n).startswith(id):
840 if n.startswith(bin_id) and hex(n).startswith(id):
841 if node is not None:
841 if node is not None:
842 raise LookupError(hex(node),
842 raise LookupError(id, self.indexfile,
843 _("Ambiguous identifier"))
843 _('ambiguous identifier'))
844 node = n
844 node = n
845 if node is not None:
845 if node is not None:
846 return node
846 return node
847 except TypeError:
847 except TypeError:
848 pass
848 pass
849
849
850 def lookup(self, id):
850 def lookup(self, id):
851 """locate a node based on:
851 """locate a node based on:
852 - revision number or str(revision number)
852 - revision number or str(revision number)
853 - nodeid or subset of hex nodeid
853 - nodeid or subset of hex nodeid
854 """
854 """
855 n = self._match(id)
855 n = self._match(id)
856 if n is not None:
856 if n is not None:
857 return n
857 return n
858 n = self._partialmatch(id)
858 n = self._partialmatch(id)
859 if n:
859 if n:
860 return n
860 return n
861
861
862 raise LookupError(id, _("No match found"))
862 raise LookupError(id, self.indexfile, _('no match found'))
863
863
864 def cmp(self, node, text):
864 def cmp(self, node, text):
865 """compare text with a given file revision"""
865 """compare text with a given file revision"""
866 p1, p2 = self.parents(node)
866 p1, p2 = self.parents(node)
867 return hash(text, p1, p2) != node
867 return hash(text, p1, p2) != node
868
868
869 def chunk(self, rev, df=None):
869 def chunk(self, rev, df=None):
870 def loadcache(df):
870 def loadcache(df):
871 if not df:
871 if not df:
872 if self._inline:
872 if self._inline:
873 df = self.opener(self.indexfile)
873 df = self.opener(self.indexfile)
874 else:
874 else:
875 df = self.opener(self.datafile)
875 df = self.opener(self.datafile)
876 df.seek(start)
876 df.seek(start)
877 self._chunkcache = (start, df.read(cache_length))
877 self._chunkcache = (start, df.read(cache_length))
878
878
879 start, length = self.start(rev), self.length(rev)
879 start, length = self.start(rev), self.length(rev)
880 if self._inline:
880 if self._inline:
881 start += (rev + 1) * self._io.size
881 start += (rev + 1) * self._io.size
882 end = start + length
882 end = start + length
883
883
884 offset = 0
884 offset = 0
885 if not self._chunkcache:
885 if not self._chunkcache:
886 cache_length = max(65536, length)
886 cache_length = max(65536, length)
887 loadcache(df)
887 loadcache(df)
888 else:
888 else:
889 cache_start = self._chunkcache[0]
889 cache_start = self._chunkcache[0]
890 cache_length = len(self._chunkcache[1])
890 cache_length = len(self._chunkcache[1])
891 cache_end = cache_start + cache_length
891 cache_end = cache_start + cache_length
892 if start >= cache_start and end <= cache_end:
892 if start >= cache_start and end <= cache_end:
893 # it is cached
893 # it is cached
894 offset = start - cache_start
894 offset = start - cache_start
895 else:
895 else:
896 cache_length = max(65536, length)
896 cache_length = max(65536, length)
897 loadcache(df)
897 loadcache(df)
898
898
899 # avoid copying large chunks
899 # avoid copying large chunks
900 c = self._chunkcache[1]
900 c = self._chunkcache[1]
901 if cache_length != length:
901 if cache_length != length:
902 c = c[offset:offset + length]
902 c = c[offset:offset + length]
903
903
904 return decompress(c)
904 return decompress(c)
905
905
906 def delta(self, node):
906 def delta(self, node):
907 """return or calculate a delta between a node and its predecessor"""
907 """return or calculate a delta between a node and its predecessor"""
908 r = self.rev(node)
908 r = self.rev(node)
909 return self.revdiff(r - 1, r)
909 return self.revdiff(r - 1, r)
910
910
911 def revdiff(self, rev1, rev2):
911 def revdiff(self, rev1, rev2):
912 """return or calculate a delta between two revisions"""
912 """return or calculate a delta between two revisions"""
913 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
913 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
914 return self.chunk(rev2)
914 return self.chunk(rev2)
915
915
916 return mdiff.textdiff(self.revision(self.node(rev1)),
916 return mdiff.textdiff(self.revision(self.node(rev1)),
917 self.revision(self.node(rev2)))
917 self.revision(self.node(rev2)))
918
918
919 def revision(self, node):
919 def revision(self, node):
920 """return an uncompressed revision of a given"""
920 """return an uncompressed revision of a given"""
921 if node == nullid:
921 if node == nullid:
922 return ""
922 return ""
923 if self._cache and self._cache[0] == node:
923 if self._cache and self._cache[0] == node:
924 return str(self._cache[2])
924 return str(self._cache[2])
925
925
926 # look up what we need to read
926 # look up what we need to read
927 text = None
927 text = None
928 rev = self.rev(node)
928 rev = self.rev(node)
929 base = self.base(rev)
929 base = self.base(rev)
930
930
931 # check rev flags
931 # check rev flags
932 if self.index[rev][0] & 0xFFFF:
932 if self.index[rev][0] & 0xFFFF:
933 raise RevlogError(_('incompatible revision flag %x') %
933 raise RevlogError(_('incompatible revision flag %x') %
934 (self.index[rev][0] & 0xFFFF))
934 (self.index[rev][0] & 0xFFFF))
935
935
936 df = None
936 df = None
937
937
938 # do we have useful data cached?
938 # do we have useful data cached?
939 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
939 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
940 base = self._cache[1]
940 base = self._cache[1]
941 text = str(self._cache[2])
941 text = str(self._cache[2])
942 self._loadindex(base, rev + 1)
942 self._loadindex(base, rev + 1)
943 if not self._inline and rev > base + 1:
943 if not self._inline and rev > base + 1:
944 df = self.opener(self.datafile)
944 df = self.opener(self.datafile)
945 else:
945 else:
946 self._loadindex(base, rev + 1)
946 self._loadindex(base, rev + 1)
947 if not self._inline and rev > base:
947 if not self._inline and rev > base:
948 df = self.opener(self.datafile)
948 df = self.opener(self.datafile)
949 text = self.chunk(base, df=df)
949 text = self.chunk(base, df=df)
950
950
951 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
951 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
952 text = mdiff.patches(text, bins)
952 text = mdiff.patches(text, bins)
953 p1, p2 = self.parents(node)
953 p1, p2 = self.parents(node)
954 if node != hash(text, p1, p2):
954 if node != hash(text, p1, p2):
955 raise RevlogError(_("integrity check failed on %s:%d")
955 raise RevlogError(_("integrity check failed on %s:%d")
956 % (self.datafile, rev))
956 % (self.datafile, rev))
957
957
958 self._cache = (node, rev, text)
958 self._cache = (node, rev, text)
959 return text
959 return text
960
960
961 def checkinlinesize(self, tr, fp=None):
961 def checkinlinesize(self, tr, fp=None):
962 if not self._inline:
962 if not self._inline:
963 return
963 return
964 if not fp:
964 if not fp:
965 fp = self.opener(self.indexfile, 'r')
965 fp = self.opener(self.indexfile, 'r')
966 fp.seek(0, 2)
966 fp.seek(0, 2)
967 size = fp.tell()
967 size = fp.tell()
968 if size < 131072:
968 if size < 131072:
969 return
969 return
970 trinfo = tr.find(self.indexfile)
970 trinfo = tr.find(self.indexfile)
971 if trinfo == None:
971 if trinfo == None:
972 raise RevlogError(_("%s not found in the transaction")
972 raise RevlogError(_("%s not found in the transaction")
973 % self.indexfile)
973 % self.indexfile)
974
974
975 trindex = trinfo[2]
975 trindex = trinfo[2]
976 dataoff = self.start(trindex)
976 dataoff = self.start(trindex)
977
977
978 tr.add(self.datafile, dataoff)
978 tr.add(self.datafile, dataoff)
979 df = self.opener(self.datafile, 'w')
979 df = self.opener(self.datafile, 'w')
980 calc = self._io.size
980 calc = self._io.size
981 for r in xrange(self.count()):
981 for r in xrange(self.count()):
982 start = self.start(r) + (r + 1) * calc
982 start = self.start(r) + (r + 1) * calc
983 length = self.length(r)
983 length = self.length(r)
984 fp.seek(start)
984 fp.seek(start)
985 d = fp.read(length)
985 d = fp.read(length)
986 df.write(d)
986 df.write(d)
987 fp.close()
987 fp.close()
988 df.close()
988 df.close()
989 fp = self.opener(self.indexfile, 'w', atomictemp=True)
989 fp = self.opener(self.indexfile, 'w', atomictemp=True)
990 self.version &= ~(REVLOGNGINLINEDATA)
990 self.version &= ~(REVLOGNGINLINEDATA)
991 self._inline = False
991 self._inline = False
992 for i in xrange(self.count()):
992 for i in xrange(self.count()):
993 e = self._io.packentry(self.index[i], self.node, self.version, i)
993 e = self._io.packentry(self.index[i], self.node, self.version, i)
994 fp.write(e)
994 fp.write(e)
995
995
996 # if we don't call rename, the temp file will never replace the
996 # if we don't call rename, the temp file will never replace the
997 # real index
997 # real index
998 fp.rename()
998 fp.rename()
999
999
1000 tr.replace(self.indexfile, trindex * calc)
1000 tr.replace(self.indexfile, trindex * calc)
1001 self._chunkcache = None
1001 self._chunkcache = None
1002
1002
1003 def addrevision(self, text, transaction, link, p1, p2, d=None):
1003 def addrevision(self, text, transaction, link, p1, p2, d=None):
1004 """add a revision to the log
1004 """add a revision to the log
1005
1005
1006 text - the revision data to add
1006 text - the revision data to add
1007 transaction - the transaction object used for rollback
1007 transaction - the transaction object used for rollback
1008 link - the linkrev data to add
1008 link - the linkrev data to add
1009 p1, p2 - the parent nodeids of the revision
1009 p1, p2 - the parent nodeids of the revision
1010 d - an optional precomputed delta
1010 d - an optional precomputed delta
1011 """
1011 """
1012 dfh = None
1012 dfh = None
1013 if not self._inline:
1013 if not self._inline:
1014 dfh = self.opener(self.datafile, "a")
1014 dfh = self.opener(self.datafile, "a")
1015 ifh = self.opener(self.indexfile, "a+")
1015 ifh = self.opener(self.indexfile, "a+")
1016 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1016 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1017
1017
1018 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1018 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1019 node = hash(text, p1, p2)
1019 node = hash(text, p1, p2)
1020 if node in self.nodemap:
1020 if node in self.nodemap:
1021 return node
1021 return node
1022
1022
1023 curr = self.count()
1023 curr = self.count()
1024 prev = curr - 1
1024 prev = curr - 1
1025 base = self.base(prev)
1025 base = self.base(prev)
1026 offset = self.end(prev)
1026 offset = self.end(prev)
1027
1027
1028 if curr:
1028 if curr:
1029 if not d:
1029 if not d:
1030 ptext = self.revision(self.node(prev))
1030 ptext = self.revision(self.node(prev))
1031 d = mdiff.textdiff(ptext, text)
1031 d = mdiff.textdiff(ptext, text)
1032 data = compress(d)
1032 data = compress(d)
1033 l = len(data[1]) + len(data[0])
1033 l = len(data[1]) + len(data[0])
1034 dist = l + offset - self.start(base)
1034 dist = l + offset - self.start(base)
1035
1035
1036 # full versions are inserted when the needed deltas
1036 # full versions are inserted when the needed deltas
1037 # become comparable to the uncompressed text
1037 # become comparable to the uncompressed text
1038 if not curr or dist > len(text) * 2:
1038 if not curr or dist > len(text) * 2:
1039 data = compress(text)
1039 data = compress(text)
1040 l = len(data[1]) + len(data[0])
1040 l = len(data[1]) + len(data[0])
1041 base = curr
1041 base = curr
1042
1042
1043 e = (offset_type(offset, 0), l, len(text),
1043 e = (offset_type(offset, 0), l, len(text),
1044 base, link, self.rev(p1), self.rev(p2), node)
1044 base, link, self.rev(p1), self.rev(p2), node)
1045 self.index.insert(-1, e)
1045 self.index.insert(-1, e)
1046 self.nodemap[node] = curr
1046 self.nodemap[node] = curr
1047
1047
1048 entry = self._io.packentry(e, self.node, self.version, curr)
1048 entry = self._io.packentry(e, self.node, self.version, curr)
1049 if not self._inline:
1049 if not self._inline:
1050 transaction.add(self.datafile, offset)
1050 transaction.add(self.datafile, offset)
1051 transaction.add(self.indexfile, curr * len(entry))
1051 transaction.add(self.indexfile, curr * len(entry))
1052 if data[0]:
1052 if data[0]:
1053 dfh.write(data[0])
1053 dfh.write(data[0])
1054 dfh.write(data[1])
1054 dfh.write(data[1])
1055 dfh.flush()
1055 dfh.flush()
1056 ifh.write(entry)
1056 ifh.write(entry)
1057 else:
1057 else:
1058 offset += curr * self._io.size
1058 offset += curr * self._io.size
1059 transaction.add(self.indexfile, offset, curr)
1059 transaction.add(self.indexfile, offset, curr)
1060 ifh.write(entry)
1060 ifh.write(entry)
1061 ifh.write(data[0])
1061 ifh.write(data[0])
1062 ifh.write(data[1])
1062 ifh.write(data[1])
1063 self.checkinlinesize(transaction, ifh)
1063 self.checkinlinesize(transaction, ifh)
1064
1064
1065 self._cache = (node, curr, text)
1065 self._cache = (node, curr, text)
1066 return node
1066 return node
1067
1067
1068 def ancestor(self, a, b):
1068 def ancestor(self, a, b):
1069 """calculate the least common ancestor of nodes a and b"""
1069 """calculate the least common ancestor of nodes a and b"""
1070
1070
1071 def parents(rev):
1071 def parents(rev):
1072 return [p for p in self.parentrevs(rev) if p != nullrev]
1072 return [p for p in self.parentrevs(rev) if p != nullrev]
1073
1073
1074 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1074 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1075 if c is None:
1075 if c is None:
1076 return nullid
1076 return nullid
1077
1077
1078 return self.node(c)
1078 return self.node(c)
1079
1079
1080 def group(self, nodelist, lookup, infocollect=None):
1080 def group(self, nodelist, lookup, infocollect=None):
1081 """calculate a delta group
1081 """calculate a delta group
1082
1082
1083 Given a list of changeset revs, return a set of deltas and
1083 Given a list of changeset revs, return a set of deltas and
1084 metadata corresponding to nodes. the first delta is
1084 metadata corresponding to nodes. the first delta is
1085 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1085 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1086 have this parent as it has all history before these
1086 have this parent as it has all history before these
1087 changesets. parent is parent[0]
1087 changesets. parent is parent[0]
1088 """
1088 """
1089 revs = [self.rev(n) for n in nodelist]
1089 revs = [self.rev(n) for n in nodelist]
1090
1090
1091 # if we don't have any revisions touched by these changesets, bail
1091 # if we don't have any revisions touched by these changesets, bail
1092 if not revs:
1092 if not revs:
1093 yield changegroup.closechunk()
1093 yield changegroup.closechunk()
1094 return
1094 return
1095
1095
1096 # add the parent of the first rev
1096 # add the parent of the first rev
1097 p = self.parents(self.node(revs[0]))[0]
1097 p = self.parents(self.node(revs[0]))[0]
1098 revs.insert(0, self.rev(p))
1098 revs.insert(0, self.rev(p))
1099
1099
1100 # build deltas
1100 # build deltas
1101 for d in xrange(0, len(revs) - 1):
1101 for d in xrange(0, len(revs) - 1):
1102 a, b = revs[d], revs[d + 1]
1102 a, b = revs[d], revs[d + 1]
1103 nb = self.node(b)
1103 nb = self.node(b)
1104
1104
1105 if infocollect is not None:
1105 if infocollect is not None:
1106 infocollect(nb)
1106 infocollect(nb)
1107
1107
1108 p = self.parents(nb)
1108 p = self.parents(nb)
1109 meta = nb + p[0] + p[1] + lookup(nb)
1109 meta = nb + p[0] + p[1] + lookup(nb)
1110 if a == -1:
1110 if a == -1:
1111 d = self.revision(nb)
1111 d = self.revision(nb)
1112 meta += mdiff.trivialdiffheader(len(d))
1112 meta += mdiff.trivialdiffheader(len(d))
1113 else:
1113 else:
1114 d = self.revdiff(a, b)
1114 d = self.revdiff(a, b)
1115 yield changegroup.chunkheader(len(meta) + len(d))
1115 yield changegroup.chunkheader(len(meta) + len(d))
1116 yield meta
1116 yield meta
1117 if len(d) > 2**20:
1117 if len(d) > 2**20:
1118 pos = 0
1118 pos = 0
1119 while pos < len(d):
1119 while pos < len(d):
1120 pos2 = pos + 2 ** 18
1120 pos2 = pos + 2 ** 18
1121 yield d[pos:pos2]
1121 yield d[pos:pos2]
1122 pos = pos2
1122 pos = pos2
1123 else:
1123 else:
1124 yield d
1124 yield d
1125
1125
1126 yield changegroup.closechunk()
1126 yield changegroup.closechunk()
1127
1127
1128 def addgroup(self, revs, linkmapper, transaction, unique=0):
1128 def addgroup(self, revs, linkmapper, transaction, unique=0):
1129 """
1129 """
1130 add a delta group
1130 add a delta group
1131
1131
1132 given a set of deltas, add them to the revision log. the
1132 given a set of deltas, add them to the revision log. the
1133 first delta is against its parent, which should be in our
1133 first delta is against its parent, which should be in our
1134 log, the rest are against the previous delta.
1134 log, the rest are against the previous delta.
1135 """
1135 """
1136
1136
1137 #track the base of the current delta log
1137 #track the base of the current delta log
1138 r = self.count()
1138 r = self.count()
1139 t = r - 1
1139 t = r - 1
1140 node = None
1140 node = None
1141
1141
1142 base = prev = nullrev
1142 base = prev = nullrev
1143 start = end = textlen = 0
1143 start = end = textlen = 0
1144 if r:
1144 if r:
1145 end = self.end(t)
1145 end = self.end(t)
1146
1146
1147 ifh = self.opener(self.indexfile, "a+")
1147 ifh = self.opener(self.indexfile, "a+")
1148 isize = r * self._io.size
1148 isize = r * self._io.size
1149 if self._inline:
1149 if self._inline:
1150 transaction.add(self.indexfile, end + isize, r)
1150 transaction.add(self.indexfile, end + isize, r)
1151 dfh = None
1151 dfh = None
1152 else:
1152 else:
1153 transaction.add(self.indexfile, isize, r)
1153 transaction.add(self.indexfile, isize, r)
1154 transaction.add(self.datafile, end)
1154 transaction.add(self.datafile, end)
1155 dfh = self.opener(self.datafile, "a")
1155 dfh = self.opener(self.datafile, "a")
1156
1156
1157 # loop through our set of deltas
1157 # loop through our set of deltas
1158 chain = None
1158 chain = None
1159 for chunk in revs:
1159 for chunk in revs:
1160 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1160 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1161 link = linkmapper(cs)
1161 link = linkmapper(cs)
1162 if node in self.nodemap:
1162 if node in self.nodemap:
1163 # this can happen if two branches make the same change
1163 # this can happen if two branches make the same change
1164 # if unique:
1164 # if unique:
1165 # raise RevlogError(_("already have %s") % hex(node[:4]))
1165 # raise RevlogError(_("already have %s") % hex(node[:4]))
1166 chain = node
1166 chain = node
1167 continue
1167 continue
1168 delta = buffer(chunk, 80)
1168 delta = buffer(chunk, 80)
1169 del chunk
1169 del chunk
1170
1170
1171 for p in (p1, p2):
1171 for p in (p1, p2):
1172 if not p in self.nodemap:
1172 if not p in self.nodemap:
1173 raise LookupError(hex(p), _("unknown parent %s") % short(p))
1173 raise LookupError(p, self.indexfile, _('unknown parent'))
1174
1174
1175 if not chain:
1175 if not chain:
1176 # retrieve the parent revision of the delta chain
1176 # retrieve the parent revision of the delta chain
1177 chain = p1
1177 chain = p1
1178 if not chain in self.nodemap:
1178 if not chain in self.nodemap:
1179 raise LookupError(hex(chain), _("unknown base %s") % short(chain[:4]))
1179 raise LookupError(chain, self.indexfile, _('unknown base'))
1180
1180
1181 # full versions are inserted when the needed deltas become
1181 # full versions are inserted when the needed deltas become
1182 # comparable to the uncompressed text or when the previous
1182 # comparable to the uncompressed text or when the previous
1183 # version is not the one we have a delta against. We use
1183 # version is not the one we have a delta against. We use
1184 # the size of the previous full rev as a proxy for the
1184 # the size of the previous full rev as a proxy for the
1185 # current size.
1185 # current size.
1186
1186
1187 if chain == prev:
1187 if chain == prev:
1188 cdelta = compress(delta)
1188 cdelta = compress(delta)
1189 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1189 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1190 textlen = mdiff.patchedsize(textlen, delta)
1190 textlen = mdiff.patchedsize(textlen, delta)
1191
1191
1192 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1192 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1193 # flush our writes here so we can read it in revision
1193 # flush our writes here so we can read it in revision
1194 if dfh:
1194 if dfh:
1195 dfh.flush()
1195 dfh.flush()
1196 ifh.flush()
1196 ifh.flush()
1197 text = self.revision(chain)
1197 text = self.revision(chain)
1198 if len(text) == 0:
1198 if len(text) == 0:
1199 # skip over trivial delta header
1199 # skip over trivial delta header
1200 text = buffer(delta, 12)
1200 text = buffer(delta, 12)
1201 else:
1201 else:
1202 text = mdiff.patches(text, [delta])
1202 text = mdiff.patches(text, [delta])
1203 del delta
1203 del delta
1204 chk = self._addrevision(text, transaction, link, p1, p2, None,
1204 chk = self._addrevision(text, transaction, link, p1, p2, None,
1205 ifh, dfh)
1205 ifh, dfh)
1206 if not dfh and not self._inline:
1206 if not dfh and not self._inline:
1207 # addrevision switched from inline to conventional
1207 # addrevision switched from inline to conventional
1208 # reopen the index
1208 # reopen the index
1209 dfh = self.opener(self.datafile, "a")
1209 dfh = self.opener(self.datafile, "a")
1210 ifh = self.opener(self.indexfile, "a")
1210 ifh = self.opener(self.indexfile, "a")
1211 if chk != node:
1211 if chk != node:
1212 raise RevlogError(_("consistency error adding group"))
1212 raise RevlogError(_("consistency error adding group"))
1213 textlen = len(text)
1213 textlen = len(text)
1214 else:
1214 else:
1215 e = (offset_type(end, 0), cdeltalen, textlen, base,
1215 e = (offset_type(end, 0), cdeltalen, textlen, base,
1216 link, self.rev(p1), self.rev(p2), node)
1216 link, self.rev(p1), self.rev(p2), node)
1217 self.index.insert(-1, e)
1217 self.index.insert(-1, e)
1218 self.nodemap[node] = r
1218 self.nodemap[node] = r
1219 entry = self._io.packentry(e, self.node, self.version, r)
1219 entry = self._io.packentry(e, self.node, self.version, r)
1220 if self._inline:
1220 if self._inline:
1221 ifh.write(entry)
1221 ifh.write(entry)
1222 ifh.write(cdelta[0])
1222 ifh.write(cdelta[0])
1223 ifh.write(cdelta[1])
1223 ifh.write(cdelta[1])
1224 self.checkinlinesize(transaction, ifh)
1224 self.checkinlinesize(transaction, ifh)
1225 if not self._inline:
1225 if not self._inline:
1226 dfh = self.opener(self.datafile, "a")
1226 dfh = self.opener(self.datafile, "a")
1227 ifh = self.opener(self.indexfile, "a")
1227 ifh = self.opener(self.indexfile, "a")
1228 else:
1228 else:
1229 dfh.write(cdelta[0])
1229 dfh.write(cdelta[0])
1230 dfh.write(cdelta[1])
1230 dfh.write(cdelta[1])
1231 ifh.write(entry)
1231 ifh.write(entry)
1232
1232
1233 t, r, chain, prev = r, r + 1, node, node
1233 t, r, chain, prev = r, r + 1, node, node
1234 base = self.base(t)
1234 base = self.base(t)
1235 start = self.start(base)
1235 start = self.start(base)
1236 end = self.end(t)
1236 end = self.end(t)
1237
1237
1238 return node
1238 return node
1239
1239
1240 def strip(self, minlink):
1240 def strip(self, minlink):
1241 """truncate the revlog on the first revision with a linkrev >= minlink
1241 """truncate the revlog on the first revision with a linkrev >= minlink
1242
1242
1243 This function is called when we're stripping revision minlink and
1243 This function is called when we're stripping revision minlink and
1244 its descendants from the repository.
1244 its descendants from the repository.
1245
1245
1246 We have to remove all revisions with linkrev >= minlink, because
1246 We have to remove all revisions with linkrev >= minlink, because
1247 the equivalent changelog revisions will be renumbered after the
1247 the equivalent changelog revisions will be renumbered after the
1248 strip.
1248 strip.
1249
1249
1250 So we truncate the revlog on the first of these revisions, and
1250 So we truncate the revlog on the first of these revisions, and
1251 trust that the caller has saved the revisions that shouldn't be
1251 trust that the caller has saved the revisions that shouldn't be
1252 removed and that it'll readd them after this truncation.
1252 removed and that it'll readd them after this truncation.
1253 """
1253 """
1254 if self.count() == 0:
1254 if self.count() == 0:
1255 return
1255 return
1256
1256
1257 if isinstance(self.index, lazyindex):
1257 if isinstance(self.index, lazyindex):
1258 self._loadindexmap()
1258 self._loadindexmap()
1259
1259
1260 for rev in xrange(0, self.count()):
1260 for rev in xrange(0, self.count()):
1261 if self.index[rev][4] >= minlink:
1261 if self.index[rev][4] >= minlink:
1262 break
1262 break
1263 else:
1263 else:
1264 return
1264 return
1265
1265
1266 # first truncate the files on disk
1266 # first truncate the files on disk
1267 end = self.start(rev)
1267 end = self.start(rev)
1268 if not self._inline:
1268 if not self._inline:
1269 df = self.opener(self.datafile, "a")
1269 df = self.opener(self.datafile, "a")
1270 df.truncate(end)
1270 df.truncate(end)
1271 end = rev * self._io.size
1271 end = rev * self._io.size
1272 else:
1272 else:
1273 end += rev * self._io.size
1273 end += rev * self._io.size
1274
1274
1275 indexf = self.opener(self.indexfile, "a")
1275 indexf = self.opener(self.indexfile, "a")
1276 indexf.truncate(end)
1276 indexf.truncate(end)
1277
1277
1278 # then reset internal state in memory to forget those revisions
1278 # then reset internal state in memory to forget those revisions
1279 self._cache = None
1279 self._cache = None
1280 self._chunkcache = None
1280 self._chunkcache = None
1281 for x in xrange(rev, self.count()):
1281 for x in xrange(rev, self.count()):
1282 del self.nodemap[self.node(x)]
1282 del self.nodemap[self.node(x)]
1283
1283
1284 del self.index[rev:-1]
1284 del self.index[rev:-1]
1285
1285
1286 def checksize(self):
1286 def checksize(self):
1287 expected = 0
1287 expected = 0
1288 if self.count():
1288 if self.count():
1289 expected = max(0, self.end(self.count() - 1))
1289 expected = max(0, self.end(self.count() - 1))
1290
1290
1291 try:
1291 try:
1292 f = self.opener(self.datafile)
1292 f = self.opener(self.datafile)
1293 f.seek(0, 2)
1293 f.seek(0, 2)
1294 actual = f.tell()
1294 actual = f.tell()
1295 dd = actual - expected
1295 dd = actual - expected
1296 except IOError, inst:
1296 except IOError, inst:
1297 if inst.errno != errno.ENOENT:
1297 if inst.errno != errno.ENOENT:
1298 raise
1298 raise
1299 dd = 0
1299 dd = 0
1300
1300
1301 try:
1301 try:
1302 f = self.opener(self.indexfile)
1302 f = self.opener(self.indexfile)
1303 f.seek(0, 2)
1303 f.seek(0, 2)
1304 actual = f.tell()
1304 actual = f.tell()
1305 s = self._io.size
1305 s = self._io.size
1306 i = max(0, actual / s)
1306 i = max(0, actual / s)
1307 di = actual - (i * s)
1307 di = actual - (i * s)
1308 if self._inline:
1308 if self._inline:
1309 databytes = 0
1309 databytes = 0
1310 for r in xrange(self.count()):
1310 for r in xrange(self.count()):
1311 databytes += max(0, self.length(r))
1311 databytes += max(0, self.length(r))
1312 dd = 0
1312 dd = 0
1313 di = actual - self.count() * s - databytes
1313 di = actual - self.count() * s - databytes
1314 except IOError, inst:
1314 except IOError, inst:
1315 if inst.errno != errno.ENOENT:
1315 if inst.errno != errno.ENOENT:
1316 raise
1316 raise
1317 di = 0
1317 di = 0
1318
1318
1319 return (dd, di)
1319 return (dd, di)
@@ -1,232 +1,232 b''
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
2 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
3 rev offset length base linkrev nodeid p1 p2
3 rev offset length base linkrev nodeid p1 p2
4 0 0 3 0 0 362fef284ce2 000000000000 000000000000
4 0 0 3 0 0 362fef284ce2 000000000000 000000000000
5 1 3 5 1 1 125144f7e028 362fef284ce2 000000000000
5 1 3 5 1 1 125144f7e028 362fef284ce2 000000000000
6 2 8 7 2 2 4c982badb186 125144f7e028 000000000000
6 2 8 7 2 2 4c982badb186 125144f7e028 000000000000
7 3 15 9 3 3 19b1fc555737 4c982badb186 000000000000
7 3 15 9 3 3 19b1fc555737 4c982badb186 000000000000
8 rev offset length base linkrev nodeid p1 p2
8 rev offset length base linkrev nodeid p1 p2
9 0 0 75 0 7 905359268f77 000000000000 000000000000
9 0 0 75 0 7 905359268f77 000000000000 000000000000
10 rev offset length base linkrev nodeid p1 p2
10 rev offset length base linkrev nodeid p1 p2
11 0 0 75 0 8 905359268f77 000000000000 000000000000
11 0 0 75 0 8 905359268f77 000000000000 000000000000
12 rev offset length base linkrev nodeid p1 p2
12 rev offset length base linkrev nodeid p1 p2
13 0 0 8 0 6 12ab3bcc5ea4 000000000000 000000000000
13 0 0 8 0 6 12ab3bcc5ea4 000000000000 000000000000
14 rev offset length base linkrev nodeid p1 p2
14 rev offset length base linkrev nodeid p1 p2
15 0 0 48 0 0 43eadb1d2d06 000000000000 000000000000
15 0 0 48 0 0 43eadb1d2d06 000000000000 000000000000
16 1 48 48 1 1 8b89697eba2c 43eadb1d2d06 000000000000
16 1 48 48 1 1 8b89697eba2c 43eadb1d2d06 000000000000
17 2 96 48 2 2 626a32663c2f 8b89697eba2c 000000000000
17 2 96 48 2 2 626a32663c2f 8b89697eba2c 000000000000
18 3 144 48 3 3 f54c32f13478 626a32663c2f 000000000000
18 3 144 48 3 3 f54c32f13478 626a32663c2f 000000000000
19 4 192 58 3 6 de68e904d169 626a32663c2f 000000000000
19 4 192 58 3 6 de68e904d169 626a32663c2f 000000000000
20 5 250 68 3 7 3b45cc2ab868 de68e904d169 000000000000
20 5 250 68 3 7 3b45cc2ab868 de68e904d169 000000000000
21 6 318 54 6 8 24d86153a002 f54c32f13478 000000000000
21 6 318 54 6 8 24d86153a002 f54c32f13478 000000000000
22 checking changesets
22 checking changesets
23 checking manifests
23 checking manifests
24 crosschecking files in changesets and manifests
24 crosschecking files in changesets and manifests
25 checking files
25 checking files
26 4 files, 9 changesets, 7 total revisions
26 4 files, 9 changesets, 7 total revisions
27 searching for changes
27 searching for changes
28 1 changesets found
28 1 changesets found
29 adding changesets
29 adding changesets
30 adding manifests
30 adding manifests
31 adding file changes
31 adding file changes
32 added 1 changesets with 1 changes to 1 files
32 added 1 changesets with 1 changes to 1 files
33 (run 'hg update' to get a working copy)
33 (run 'hg update' to get a working copy)
34 checking changesets
34 checking changesets
35 checking manifests
35 checking manifests
36 crosschecking files in changesets and manifests
36 crosschecking files in changesets and manifests
37 checking files
37 checking files
38 1 files, 1 changesets, 1 total revisions
38 1 files, 1 changesets, 1 total revisions
39 0:5649c9d34dd8
39 0:5649c9d34dd8
40 searching for changes
40 searching for changes
41 2 changesets found
41 2 changesets found
42 adding changesets
42 adding changesets
43 adding manifests
43 adding manifests
44 adding file changes
44 adding file changes
45 added 2 changesets with 2 changes to 1 files
45 added 2 changesets with 2 changes to 1 files
46 (run 'hg update' to get a working copy)
46 (run 'hg update' to get a working copy)
47 checking changesets
47 checking changesets
48 checking manifests
48 checking manifests
49 crosschecking files in changesets and manifests
49 crosschecking files in changesets and manifests
50 checking files
50 checking files
51 1 files, 2 changesets, 2 total revisions
51 1 files, 2 changesets, 2 total revisions
52 1:10b2180f755b
52 1:10b2180f755b
53 searching for changes
53 searching for changes
54 3 changesets found
54 3 changesets found
55 adding changesets
55 adding changesets
56 adding manifests
56 adding manifests
57 adding file changes
57 adding file changes
58 added 3 changesets with 3 changes to 1 files
58 added 3 changesets with 3 changes to 1 files
59 (run 'hg update' to get a working copy)
59 (run 'hg update' to get a working copy)
60 checking changesets
60 checking changesets
61 checking manifests
61 checking manifests
62 crosschecking files in changesets and manifests
62 crosschecking files in changesets and manifests
63 checking files
63 checking files
64 1 files, 3 changesets, 3 total revisions
64 1 files, 3 changesets, 3 total revisions
65 2:d62976ca1e50
65 2:d62976ca1e50
66 searching for changes
66 searching for changes
67 4 changesets found
67 4 changesets found
68 adding changesets
68 adding changesets
69 adding manifests
69 adding manifests
70 adding file changes
70 adding file changes
71 added 4 changesets with 4 changes to 1 files
71 added 4 changesets with 4 changes to 1 files
72 (run 'hg update' to get a working copy)
72 (run 'hg update' to get a working copy)
73 checking changesets
73 checking changesets
74 checking manifests
74 checking manifests
75 crosschecking files in changesets and manifests
75 crosschecking files in changesets and manifests
76 checking files
76 checking files
77 1 files, 4 changesets, 4 total revisions
77 1 files, 4 changesets, 4 total revisions
78 3:ac69c658229d
78 3:ac69c658229d
79 searching for changes
79 searching for changes
80 2 changesets found
80 2 changesets found
81 adding changesets
81 adding changesets
82 adding manifests
82 adding manifests
83 adding file changes
83 adding file changes
84 added 2 changesets with 2 changes to 1 files
84 added 2 changesets with 2 changes to 1 files
85 (run 'hg update' to get a working copy)
85 (run 'hg update' to get a working copy)
86 checking changesets
86 checking changesets
87 checking manifests
87 checking manifests
88 crosschecking files in changesets and manifests
88 crosschecking files in changesets and manifests
89 checking files
89 checking files
90 1 files, 2 changesets, 2 total revisions
90 1 files, 2 changesets, 2 total revisions
91 1:5f4f3ceb285e
91 1:5f4f3ceb285e
92 searching for changes
92 searching for changes
93 3 changesets found
93 3 changesets found
94 adding changesets
94 adding changesets
95 adding manifests
95 adding manifests
96 adding file changes
96 adding file changes
97 added 3 changesets with 3 changes to 1 files
97 added 3 changesets with 3 changes to 1 files
98 (run 'hg update' to get a working copy)
98 (run 'hg update' to get a working copy)
99 checking changesets
99 checking changesets
100 checking manifests
100 checking manifests
101 crosschecking files in changesets and manifests
101 crosschecking files in changesets and manifests
102 checking files
102 checking files
103 1 files, 3 changesets, 3 total revisions
103 1 files, 3 changesets, 3 total revisions
104 2:024e4e7df376
104 2:024e4e7df376
105 searching for changes
105 searching for changes
106 4 changesets found
106 4 changesets found
107 adding changesets
107 adding changesets
108 adding manifests
108 adding manifests
109 adding file changes
109 adding file changes
110 added 4 changesets with 5 changes to 2 files
110 added 4 changesets with 5 changes to 2 files
111 (run 'hg update' to get a working copy)
111 (run 'hg update' to get a working copy)
112 checking changesets
112 checking changesets
113 checking manifests
113 checking manifests
114 crosschecking files in changesets and manifests
114 crosschecking files in changesets and manifests
115 checking files
115 checking files
116 2 files, 4 changesets, 5 total revisions
116 2 files, 4 changesets, 5 total revisions
117 3:1e3f6b843bd6
117 3:1e3f6b843bd6
118 searching for changes
118 searching for changes
119 5 changesets found
119 5 changesets found
120 adding changesets
120 adding changesets
121 adding manifests
121 adding manifests
122 adding file changes
122 adding file changes
123 added 5 changesets with 6 changes to 3 files
123 added 5 changesets with 6 changes to 3 files
124 (run 'hg update' to get a working copy)
124 (run 'hg update' to get a working copy)
125 checking changesets
125 checking changesets
126 checking manifests
126 checking manifests
127 crosschecking files in changesets and manifests
127 crosschecking files in changesets and manifests
128 checking files
128 checking files
129 3 files, 5 changesets, 6 total revisions
129 3 files, 5 changesets, 6 total revisions
130 4:80fe151401c2
130 4:80fe151401c2
131 searching for changes
131 searching for changes
132 5 changesets found
132 5 changesets found
133 adding changesets
133 adding changesets
134 adding manifests
134 adding manifests
135 adding file changes
135 adding file changes
136 added 5 changesets with 5 changes to 2 files
136 added 5 changesets with 5 changes to 2 files
137 (run 'hg update' to get a working copy)
137 (run 'hg update' to get a working copy)
138 checking changesets
138 checking changesets
139 checking manifests
139 checking manifests
140 crosschecking files in changesets and manifests
140 crosschecking files in changesets and manifests
141 checking files
141 checking files
142 2 files, 5 changesets, 5 total revisions
142 2 files, 5 changesets, 5 total revisions
143 4:836ac62537ab
143 4:836ac62537ab
144 pulling from ../test-7
144 pulling from ../test-7
145 searching for changes
145 searching for changes
146 adding changesets
146 adding changesets
147 adding manifests
147 adding manifests
148 adding file changes
148 adding file changes
149 added 4 changesets with 2 changes to 3 files (+1 heads)
149 added 4 changesets with 2 changes to 3 files (+1 heads)
150 (run 'hg heads' to see heads, 'hg merge' to merge)
150 (run 'hg heads' to see heads, 'hg merge' to merge)
151 checking changesets
151 checking changesets
152 checking manifests
152 checking manifests
153 crosschecking files in changesets and manifests
153 crosschecking files in changesets and manifests
154 checking files
154 checking files
155 4 files, 9 changesets, 7 total revisions
155 4 files, 9 changesets, 7 total revisions
156 rolling back last transaction
156 rolling back last transaction
157 % should fail
157 % should fail
158 abort: --base is incompatible with specifiying a destination
158 abort: --base is incompatible with specifiying a destination
159 abort: repository default-push not found!
159 abort: repository default-push not found!
160 2 changesets found
160 2 changesets found
161 4 changesets found
161 4 changesets found
162 6 changesets found
162 6 changesets found
163 1 changesets found
163 1 changesets found
164 1 changesets found
164 1 changesets found
165 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
166 % 2
166 % 2
167 2:d62976ca1e50
167 2:d62976ca1e50
168 adding changesets
168 adding changesets
169 transaction abort!
169 transaction abort!
170 rollback completed
170 rollback completed
171 abort: unknown parent ac69c658229d!
171 abort: 00changelog.i@ac69c658229d: unknown parent!
172 % 2
172 % 2
173 2:d62976ca1e50
173 2:d62976ca1e50
174 adding changesets
174 adding changesets
175 adding manifests
175 adding manifests
176 adding file changes
176 adding file changes
177 added 6 changesets with 4 changes to 4 files (+1 heads)
177 added 6 changesets with 4 changes to 4 files (+1 heads)
178 (run 'hg heads' to see heads, 'hg merge' to merge)
178 (run 'hg heads' to see heads, 'hg merge' to merge)
179 % 8
179 % 8
180 8:836ac62537ab
180 8:836ac62537ab
181 checking changesets
181 checking changesets
182 checking manifests
182 checking manifests
183 crosschecking files in changesets and manifests
183 crosschecking files in changesets and manifests
184 checking files
184 checking files
185 4 files, 9 changesets, 7 total revisions
185 4 files, 9 changesets, 7 total revisions
186 rolling back last transaction
186 rolling back last transaction
187 % 2
187 % 2
188 2:d62976ca1e50
188 2:d62976ca1e50
189 adding changesets
189 adding changesets
190 adding manifests
190 adding manifests
191 adding file changes
191 adding file changes
192 added 2 changesets with 2 changes to 2 files
192 added 2 changesets with 2 changes to 2 files
193 (run 'hg update' to get a working copy)
193 (run 'hg update' to get a working copy)
194 % 4
194 % 4
195 4:836ac62537ab
195 4:836ac62537ab
196 checking changesets
196 checking changesets
197 checking manifests
197 checking manifests
198 crosschecking files in changesets and manifests
198 crosschecking files in changesets and manifests
199 checking files
199 checking files
200 2 files, 5 changesets, 5 total revisions
200 2 files, 5 changesets, 5 total revisions
201 rolling back last transaction
201 rolling back last transaction
202 adding changesets
202 adding changesets
203 adding manifests
203 adding manifests
204 adding file changes
204 adding file changes
205 added 4 changesets with 3 changes to 3 files (+1 heads)
205 added 4 changesets with 3 changes to 3 files (+1 heads)
206 (run 'hg heads' to see heads, 'hg merge' to merge)
206 (run 'hg heads' to see heads, 'hg merge' to merge)
207 % 6
207 % 6
208 6:80fe151401c2
208 6:80fe151401c2
209 checking changesets
209 checking changesets
210 checking manifests
210 checking manifests
211 crosschecking files in changesets and manifests
211 crosschecking files in changesets and manifests
212 checking files
212 checking files
213 3 files, 7 changesets, 6 total revisions
213 3 files, 7 changesets, 6 total revisions
214 warning: detected divergent renames of afile to:
214 warning: detected divergent renames of afile to:
215 anotherfile
215 anotherfile
216 adifferentfile
216 adifferentfile
217 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
217 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
218 (branch merge, don't forget to commit)
218 (branch merge, don't forget to commit)
219 7 changesets found
219 7 changesets found
220 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
220 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
221 adding changesets
221 adding changesets
222 adding manifests
222 adding manifests
223 adding file changes
223 adding file changes
224 added 7 changesets with 4 changes to 4 files
224 added 7 changesets with 4 changes to 4 files
225 (run 'hg update' to get a working copy)
225 (run 'hg update' to get a working copy)
226 % 9
226 % 9
227 9:607fe5912aad
227 9:607fe5912aad
228 checking changesets
228 checking changesets
229 checking manifests
229 checking manifests
230 crosschecking files in changesets and manifests
230 crosschecking files in changesets and manifests
231 checking files
231 checking files
232 4 files, 10 changesets, 7 total revisions
232 4 files, 10 changesets, 7 total revisions
@@ -1,22 +1,22 b''
1 diff -r acd8075edac9 b
1 diff -r acd8075edac9 b
2 --- /dev/null
2 --- /dev/null
3 +++ b/b
3 +++ b/b
4 @@ -0,0 +1,1 @@
4 @@ -0,0 +1,1 @@
5 +123
5 +123
6 diff -r acd8075edac9 b
6 diff -r acd8075edac9 b
7 --- /dev/null
7 --- /dev/null
8 +++ b/b
8 +++ b/b
9 @@ -0,0 +1,1 @@
9 @@ -0,0 +1,1 @@
10 +123
10 +123
11 diff -r acd8075edac9 a
11 diff -r acd8075edac9 a
12 --- a/a
12 --- a/a
13 +++ b/a
13 +++ b/a
14 @@ -0,0 +1,1 @@
14 @@ -0,0 +1,1 @@
15 +foo
15 +foo
16 diff -r acd8075edac9 b
16 diff -r acd8075edac9 b
17 --- /dev/null
17 --- /dev/null
18 +++ b/b
18 +++ b/b
19 @@ -0,0 +1,1 @@
19 @@ -0,0 +1,1 @@
20 +123
20 +123
21 abort: Ambiguous identifier!
21 abort: 00changelog.i@: ambiguous identifier!
22 abort: Ambiguous identifier!
22 abort: 00changelog.i@: ambiguous identifier!
@@ -1,50 +1,50 b''
1 adding bar
1 adding bar
2 adding foo
2 adding foo
3 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
3 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
4 % start imerge
4 % start imerge
5 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
6 (branch merge, don't forget to commit)
6 (branch merge, don't forget to commit)
7 U foo
7 U foo
8 foo
8 foo
9 bar
9 bar
10 bar
10 bar
11 bar
11 bar
12 % status -v
12 % status -v
13 merging e6da46716401 and 30d266f502e7
13 merging e6da46716401 and 30d266f502e7
14 U foo (foo2)
14 U foo (foo2)
15 % next
15 % next
16 foo
16 foo
17 % merge next
17 % merge next
18 merging foo and foo2
18 merging foo and foo2
19 all conflicts resolved
19 all conflicts resolved
20 % unresolve
20 % unresolve
21 % merge foo
21 % merge foo
22 merging foo and foo2
22 merging foo and foo2
23 all conflicts resolved
23 all conflicts resolved
24 % save
24 % save
25 % load
25 % load
26 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
26 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 (branch merge, don't forget to commit)
29 (branch merge, don't forget to commit)
30 R foo
30 R foo
31 all conflicts resolved
31 all conflicts resolved
32 foo
32 foo
33 changeset: 3:fa9a6defdcaf
33 changeset: 3:fa9a6defdcaf
34 tag: tip
34 tag: tip
35 parent: 2:e6da46716401
35 parent: 2:e6da46716401
36 parent: 1:30d266f502e7
36 parent: 1:30d266f502e7
37 user: test
37 user: test
38 date: Thu Jan 01 00:00:03 1970 +0000
38 date: Thu Jan 01 00:00:03 1970 +0000
39 files: foo foo2
39 files: foo foo2
40 description:
40 description:
41 merged
41 merged
42
42
43
43
44 % nothing to merge -- tip
44 % nothing to merge -- tip
45 abort: there is nothing to merge
45 abort: there is nothing to merge
46 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
46 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
47 % nothing to merge
47 % nothing to merge
48 abort: there is nothing to merge - use "hg update" instead
48 abort: there is nothing to merge - use "hg update" instead
49 % load unknown parent
49 % load unknown parent
50 abort: merge parent e6da4671640124fe5d3d70d2f54441d6cd76ae35 not in repository
50 abort: merge parent e6da46716401 not in repository
@@ -1,223 +1,223 b''
1 adding a
1 adding a
2 changeset: 0:8580ff50825a
2 changeset: 0:8580ff50825a
3 user: test
3 user: test
4 date: Thu Jan 01 00:00:01 1970 +0000
4 date: Thu Jan 01 00:00:01 1970 +0000
5 summary: a
5 summary: a
6
6
7 % -f, directory
7 % -f, directory
8 abort: can only follow copies/renames for explicit file names
8 abort: can only follow copies/renames for explicit file names
9 % -f, but no args
9 % -f, but no args
10 changeset: 4:b30c444c7c84
10 changeset: 4:b30c444c7c84
11 tag: tip
11 tag: tip
12 user: test
12 user: test
13 date: Thu Jan 01 00:00:05 1970 +0000
13 date: Thu Jan 01 00:00:05 1970 +0000
14 summary: e
14 summary: e
15
15
16 changeset: 3:16b60bf3f99a
16 changeset: 3:16b60bf3f99a
17 user: test
17 user: test
18 date: Thu Jan 01 00:00:04 1970 +0000
18 date: Thu Jan 01 00:00:04 1970 +0000
19 summary: d
19 summary: d
20
20
21 changeset: 2:21fba396af4c
21 changeset: 2:21fba396af4c
22 user: test
22 user: test
23 date: Thu Jan 01 00:00:03 1970 +0000
23 date: Thu Jan 01 00:00:03 1970 +0000
24 summary: c
24 summary: c
25
25
26 changeset: 1:c0296dabce9b
26 changeset: 1:c0296dabce9b
27 user: test
27 user: test
28 date: Thu Jan 01 00:00:02 1970 +0000
28 date: Thu Jan 01 00:00:02 1970 +0000
29 summary: b
29 summary: b
30
30
31 changeset: 0:8580ff50825a
31 changeset: 0:8580ff50825a
32 user: test
32 user: test
33 date: Thu Jan 01 00:00:01 1970 +0000
33 date: Thu Jan 01 00:00:01 1970 +0000
34 summary: a
34 summary: a
35
35
36 % one rename
36 % one rename
37 changeset: 0:8580ff50825a
37 changeset: 0:8580ff50825a
38 user: test
38 user: test
39 date: Thu Jan 01 00:00:01 1970 +0000
39 date: Thu Jan 01 00:00:01 1970 +0000
40 files: a
40 files: a
41 description:
41 description:
42 a
42 a
43
43
44
44
45 % many renames
45 % many renames
46 changeset: 4:b30c444c7c84
46 changeset: 4:b30c444c7c84
47 tag: tip
47 tag: tip
48 user: test
48 user: test
49 date: Thu Jan 01 00:00:05 1970 +0000
49 date: Thu Jan 01 00:00:05 1970 +0000
50 files: dir/b e
50 files: dir/b e
51 description:
51 description:
52 e
52 e
53
53
54
54
55 changeset: 2:21fba396af4c
55 changeset: 2:21fba396af4c
56 user: test
56 user: test
57 date: Thu Jan 01 00:00:03 1970 +0000
57 date: Thu Jan 01 00:00:03 1970 +0000
58 files: b dir/b
58 files: b dir/b
59 description:
59 description:
60 c
60 c
61
61
62
62
63 changeset: 1:c0296dabce9b
63 changeset: 1:c0296dabce9b
64 user: test
64 user: test
65 date: Thu Jan 01 00:00:02 1970 +0000
65 date: Thu Jan 01 00:00:02 1970 +0000
66 files: b
66 files: b
67 description:
67 description:
68 b
68 b
69
69
70
70
71 changeset: 0:8580ff50825a
71 changeset: 0:8580ff50825a
72 user: test
72 user: test
73 date: Thu Jan 01 00:00:01 1970 +0000
73 date: Thu Jan 01 00:00:01 1970 +0000
74 files: a
74 files: a
75 description:
75 description:
76 a
76 a
77
77
78
78
79 % log copies
79 % log copies
80 4 e (dir/b)
80 4 e (dir/b)
81 3 b (a)
81 3 b (a)
82 2 dir/b (b)
82 2 dir/b (b)
83 1 b (a)
83 1 b (a)
84 0
84 0
85 % log copies, non-linear manifest
85 % log copies, non-linear manifest
86 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
86 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
87 adding foo
87 adding foo
88 5 e (dir/b)
88 5 e (dir/b)
89 % log copies, execute bit set
89 % log copies, execute bit set
90 6
90 6
91 % log -p d
91 % log -p d
92 changeset: 3:16b60bf3f99a
92 changeset: 3:16b60bf3f99a
93 user: test
93 user: test
94 date: Thu Jan 01 00:00:04 1970 +0000
94 date: Thu Jan 01 00:00:04 1970 +0000
95 files: a b d
95 files: a b d
96 description:
96 description:
97 d
97 d
98
98
99
99
100 diff -r 21fba396af4c -r 16b60bf3f99a d
100 diff -r 21fba396af4c -r 16b60bf3f99a d
101 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
101 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
102 +++ b/d Thu Jan 01 00:00:04 1970 +0000
102 +++ b/d Thu Jan 01 00:00:04 1970 +0000
103 @@ -0,0 +1,1 @@
103 @@ -0,0 +1,1 @@
104 +a
104 +a
105
105
106 adding base
106 adding base
107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 adding b1
108 adding b1
109 % log -f
109 % log -f
110 changeset: 3:e62f78d544b4
110 changeset: 3:e62f78d544b4
111 tag: tip
111 tag: tip
112 parent: 1:3d5bf5654eda
112 parent: 1:3d5bf5654eda
113 user: test
113 user: test
114 date: Thu Jan 01 00:00:01 1970 +0000
114 date: Thu Jan 01 00:00:01 1970 +0000
115 summary: b1
115 summary: b1
116
116
117 changeset: 1:3d5bf5654eda
117 changeset: 1:3d5bf5654eda
118 user: test
118 user: test
119 date: Thu Jan 01 00:00:01 1970 +0000
119 date: Thu Jan 01 00:00:01 1970 +0000
120 summary: r1
120 summary: r1
121
121
122 changeset: 0:67e992f2c4f3
122 changeset: 0:67e992f2c4f3
123 user: test
123 user: test
124 date: Thu Jan 01 00:00:01 1970 +0000
124 date: Thu Jan 01 00:00:01 1970 +0000
125 summary: base
125 summary: base
126
126
127 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
127 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
128 adding b2
128 adding b2
129 % log -f -r 1:tip
129 % log -f -r 1:tip
130 changeset: 1:3d5bf5654eda
130 changeset: 1:3d5bf5654eda
131 user: test
131 user: test
132 date: Thu Jan 01 00:00:01 1970 +0000
132 date: Thu Jan 01 00:00:01 1970 +0000
133 summary: r1
133 summary: r1
134
134
135 changeset: 2:60c670bf5b30
135 changeset: 2:60c670bf5b30
136 user: test
136 user: test
137 date: Thu Jan 01 00:00:01 1970 +0000
137 date: Thu Jan 01 00:00:01 1970 +0000
138 summary: r2
138 summary: r2
139
139
140 changeset: 3:e62f78d544b4
140 changeset: 3:e62f78d544b4
141 parent: 1:3d5bf5654eda
141 parent: 1:3d5bf5654eda
142 user: test
142 user: test
143 date: Thu Jan 01 00:00:01 1970 +0000
143 date: Thu Jan 01 00:00:01 1970 +0000
144 summary: b1
144 summary: b1
145
145
146 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
146 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
147 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
148 (branch merge, don't forget to commit)
148 (branch merge, don't forget to commit)
149 % log -r . with two parents
149 % log -r . with two parents
150 warning: working directory has two parents, tag '.' uses the first
150 warning: working directory has two parents, tag '.' uses the first
151 changeset: 3:e62f78d544b4
151 changeset: 3:e62f78d544b4
152 parent: 1:3d5bf5654eda
152 parent: 1:3d5bf5654eda
153 user: test
153 user: test
154 date: Thu Jan 01 00:00:01 1970 +0000
154 date: Thu Jan 01 00:00:01 1970 +0000
155 summary: b1
155 summary: b1
156
156
157 % log -r . with one parent
157 % log -r . with one parent
158 changeset: 5:302e9dd6890d
158 changeset: 5:302e9dd6890d
159 tag: tip
159 tag: tip
160 parent: 3:e62f78d544b4
160 parent: 3:e62f78d544b4
161 parent: 4:ddb82e70d1a1
161 parent: 4:ddb82e70d1a1
162 user: test
162 user: test
163 date: Thu Jan 01 00:00:01 1970 +0000
163 date: Thu Jan 01 00:00:01 1970 +0000
164 summary: m12
164 summary: m12
165
165
166 % log --follow-first
166 % log --follow-first
167 changeset: 6:2404bbcab562
167 changeset: 6:2404bbcab562
168 tag: tip
168 tag: tip
169 user: test
169 user: test
170 date: Thu Jan 01 00:00:01 1970 +0000
170 date: Thu Jan 01 00:00:01 1970 +0000
171 summary: b1.1
171 summary: b1.1
172
172
173 changeset: 5:302e9dd6890d
173 changeset: 5:302e9dd6890d
174 parent: 3:e62f78d544b4
174 parent: 3:e62f78d544b4
175 parent: 4:ddb82e70d1a1
175 parent: 4:ddb82e70d1a1
176 user: test
176 user: test
177 date: Thu Jan 01 00:00:01 1970 +0000
177 date: Thu Jan 01 00:00:01 1970 +0000
178 summary: m12
178 summary: m12
179
179
180 changeset: 3:e62f78d544b4
180 changeset: 3:e62f78d544b4
181 parent: 1:3d5bf5654eda
181 parent: 1:3d5bf5654eda
182 user: test
182 user: test
183 date: Thu Jan 01 00:00:01 1970 +0000
183 date: Thu Jan 01 00:00:01 1970 +0000
184 summary: b1
184 summary: b1
185
185
186 changeset: 1:3d5bf5654eda
186 changeset: 1:3d5bf5654eda
187 user: test
187 user: test
188 date: Thu Jan 01 00:00:01 1970 +0000
188 date: Thu Jan 01 00:00:01 1970 +0000
189 summary: r1
189 summary: r1
190
190
191 changeset: 0:67e992f2c4f3
191 changeset: 0:67e992f2c4f3
192 user: test
192 user: test
193 date: Thu Jan 01 00:00:01 1970 +0000
193 date: Thu Jan 01 00:00:01 1970 +0000
194 summary: base
194 summary: base
195
195
196 % log -P 2
196 % log -P 2
197 changeset: 6:2404bbcab562
197 changeset: 6:2404bbcab562
198 tag: tip
198 tag: tip
199 user: test
199 user: test
200 date: Thu Jan 01 00:00:01 1970 +0000
200 date: Thu Jan 01 00:00:01 1970 +0000
201 summary: b1.1
201 summary: b1.1
202
202
203 changeset: 5:302e9dd6890d
203 changeset: 5:302e9dd6890d
204 parent: 3:e62f78d544b4
204 parent: 3:e62f78d544b4
205 parent: 4:ddb82e70d1a1
205 parent: 4:ddb82e70d1a1
206 user: test
206 user: test
207 date: Thu Jan 01 00:00:01 1970 +0000
207 date: Thu Jan 01 00:00:01 1970 +0000
208 summary: m12
208 summary: m12
209
209
210 changeset: 4:ddb82e70d1a1
210 changeset: 4:ddb82e70d1a1
211 parent: 0:67e992f2c4f3
211 parent: 0:67e992f2c4f3
212 user: test
212 user: test
213 date: Thu Jan 01 00:00:01 1970 +0000
213 date: Thu Jan 01 00:00:01 1970 +0000
214 summary: b2
214 summary: b2
215
215
216 changeset: 3:e62f78d544b4
216 changeset: 3:e62f78d544b4
217 parent: 1:3d5bf5654eda
217 parent: 1:3d5bf5654eda
218 user: test
218 user: test
219 date: Thu Jan 01 00:00:01 1970 +0000
219 date: Thu Jan 01 00:00:01 1970 +0000
220 summary: b1
220 summary: b1
221
221
222 % log -r ""
222 % log -r ""
223 abort: Ambiguous identifier!
223 abort: 00changelog.i@: ambiguous identifier!
General Comments 0
You need to be logged in to leave comments. Login now