##// END OF EJS Templates
Issue a warning if "-r ." is used with two working directory parents....
Thomas Arendsen Hein -
r4510:e0bc2c57 default
parent child Browse files
Show More
@@ -1,1950 +1,1953 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.root = os.path.realpath(path)
33 self.root = os.path.realpath(path)
34 self.path = os.path.join(self.root, ".hg")
34 self.path = os.path.join(self.root, ".hg")
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 requirements = ["revlogv1"]
44 requirements = ["revlogv1"]
45 if parentui.configbool('format', 'usestore', True):
45 if parentui.configbool('format', 'usestore', True):
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements.append("store")
47 requirements.append("store")
48 # create an invalid changelog
48 # create an invalid changelog
49 self.opener("00changelog.i", "a").write(
49 self.opener("00changelog.i", "a").write(
50 '\0\0\0\2' # represents revlogv2
50 '\0\0\0\2' # represents revlogv2
51 ' dummy changelog to prevent using the old repo layout'
51 ' dummy changelog to prevent using the old repo layout'
52 )
52 )
53 reqfile = self.opener("requires", "w")
53 reqfile = self.opener("requires", "w")
54 for r in requirements:
54 for r in requirements:
55 reqfile.write("%s\n" % r)
55 reqfile.write("%s\n" % r)
56 reqfile.close()
56 reqfile.close()
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94
94
95 fallback = self.ui.config('ui', 'fallbackencoding')
95 fallback = self.ui.config('ui', 'fallbackencoding')
96 if fallback:
96 if fallback:
97 util._fallbackencoding = fallback
97 util._fallbackencoding = fallback
98
98
99 self.tagscache = None
99 self.tagscache = None
100 self.branchcache = None
100 self.branchcache = None
101 self.nodetagscache = None
101 self.nodetagscache = None
102 self.filterpats = {}
102 self.filterpats = {}
103 self.transhandle = None
103 self.transhandle = None
104
104
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106
106
107 def url(self):
107 def url(self):
108 return 'file:' + self.root
108 return 'file:' + self.root
109
109
110 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
111 def callhook(hname, funcname):
111 def callhook(hname, funcname):
112 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
113 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
114 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
115 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
116
116
117 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
118 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
119 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
120
120
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 obj = funcname
122 obj = funcname
123 if not callable(obj):
123 if not callable(obj):
124 d = funcname.rfind('.')
124 d = funcname.rfind('.')
125 if d == -1:
125 if d == -1:
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
128 modname = funcname[:d]
128 modname = funcname[:d]
129 try:
129 try:
130 obj = __import__(modname)
130 obj = __import__(modname)
131 except ImportError:
131 except ImportError:
132 try:
132 try:
133 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
134 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
135 except ImportError:
135 except ImportError:
136 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
137 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
138 (hname, modname))
138 (hname, modname))
139 try:
139 try:
140 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
141 obj = getattr(obj, p)
141 obj = getattr(obj, p)
142 except AttributeError, err:
142 except AttributeError, err:
143 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
144 '("%s" is not defined)') %
144 '("%s" is not defined)') %
145 (hname, funcname))
145 (hname, funcname))
146 if not callable(obj):
146 if not callable(obj):
147 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
148 '("%s" is not callable)') %
148 '("%s" is not callable)') %
149 (hname, funcname))
149 (hname, funcname))
150 try:
150 try:
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
153 raise
153 raise
154 except Exception, exc:
154 except Exception, exc:
155 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
156 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 (hname, exc.args[0]))
157 (hname, exc.args[0]))
158 else:
158 else:
159 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
160 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
161 if throw:
161 if throw:
162 raise
162 raise
163 self.ui.print_exc()
163 self.ui.print_exc()
164 return True
164 return True
165 if r:
165 if r:
166 if throw:
166 if throw:
167 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 return r
169 return r
170
170
171 def runhook(name, cmd):
171 def runhook(name, cmd):
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
175 if r:
175 if r:
176 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
177 if throw:
177 if throw:
178 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 return r
180 return r
181
181
182 r = False
182 r = False
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
185 hooks.sort()
185 hooks.sort()
186 for hname, cmd in hooks:
186 for hname, cmd in hooks:
187 if callable(cmd):
187 if callable(cmd):
188 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
189 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
190 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
191 else:
191 else:
192 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
193 return r
193 return r
194
194
195 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
196
196
197 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
198 use_dirstate = parent is None
198 use_dirstate = parent is None
199
199
200 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
201 if c in name:
201 if c in name:
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203
203
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205
205
206 if local:
206 if local:
207 # local tags are stored in the current charset
207 # local tags are stored in the current charset
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
210 return
210 return
211
211
212 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 if use_dirstate:
214 if use_dirstate:
215 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
216 else:
216 else:
217 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
218 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 self.add(['.hgtags'])
220 self.add(['.hgtags'])
221
221
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223
223
224 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
225
225
226 return tagnode
226 return tagnode
227
227
228 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
229 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
230
230
231 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
232 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
233 changeset is committed with the change.
233 changeset is committed with the change.
234
234
235 keyword arguments:
235 keyword arguments:
236
236
237 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
238 (default False)
238 (default False)
239
239
240 message: commit message to use if committing
240 message: commit message to use if committing
241
241
242 user: name of user to use if committing
242 user: name of user to use if committing
243
243
244 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251
251
252 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
253
253
254 def tags(self):
254 def tags(self):
255 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
256 if self.tagscache:
256 if self.tagscache:
257 return self.tagscache
257 return self.tagscache
258
258
259 globaltags = {}
259 globaltags = {}
260
260
261 def readtags(lines, fn):
261 def readtags(lines, fn):
262 filetags = {}
262 filetags = {}
263 count = 0
263 count = 0
264
264
265 def warn(msg):
265 def warn(msg):
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267
267
268 for l in lines:
268 for l in lines:
269 count += 1
269 count += 1
270 if not l:
270 if not l:
271 continue
271 continue
272 s = l.split(" ", 1)
272 s = l.split(" ", 1)
273 if len(s) != 2:
273 if len(s) != 2:
274 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
275 continue
275 continue
276 node, key = s
276 node, key = s
277 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
278 try:
278 try:
279 bin_n = bin(node)
279 bin_n = bin(node)
280 except TypeError:
280 except TypeError:
281 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
282 continue
282 continue
283 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
284 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
285 continue
285 continue
286
286
287 h = []
287 h = []
288 if key in filetags:
288 if key in filetags:
289 n, h = filetags[key]
289 n, h = filetags[key]
290 h.append(n)
290 h.append(n)
291 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
292
292
293 for k,nh in filetags.items():
293 for k,nh in filetags.items():
294 if k not in globaltags:
294 if k not in globaltags:
295 globaltags[k] = nh
295 globaltags[k] = nh
296 continue
296 continue
297 # we prefer the global tag if:
297 # we prefer the global tag if:
298 # it supercedes us OR
298 # it supercedes us OR
299 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
300 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
301 an, ah = nh
301 an, ah = nh
302 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
303 if bn != an and an in bh and \
303 if bn != an and an in bh and \
304 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
305 an = bn
305 an = bn
306 ah.extend([n for n in bh if n not in ah])
306 ah.extend([n for n in bh if n not in ah])
307 globaltags[k] = an, ah
307 globaltags[k] = an, ah
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 f = None
310 f = None
311 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
312 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
313 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
314 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
315
315
316 try:
316 try:
317 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
318 # localtags are stored in the local character set
318 # localtags are stored in the local character set
319 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
320 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
321 except IOError:
321 except IOError:
322 pass
322 pass
323
323
324 self.tagscache = {}
324 self.tagscache = {}
325 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
326 n = nh[0]
326 n = nh[0]
327 if n != nullid:
327 if n != nullid:
328 self.tagscache[k] = n
328 self.tagscache[k] = n
329 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
330
330
331 return self.tagscache
331 return self.tagscache
332
332
333 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
334 heads = self.heads()
334 heads = self.heads()
335 heads.reverse()
335 heads.reverse()
336 last = {}
336 last = {}
337 ret = []
337 ret = []
338 for node in heads:
338 for node in heads:
339 c = self.changectx(node)
339 c = self.changectx(node)
340 rev = c.rev()
340 rev = c.rev()
341 try:
341 try:
342 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
343 except revlog.LookupError:
343 except revlog.LookupError:
344 continue
344 continue
345 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
346 if fnode in last:
346 if fnode in last:
347 ret[last[fnode]] = None
347 ret[last[fnode]] = None
348 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
349 return [item for item in ret if item]
349 return [item for item in ret if item]
350
350
351 def tagslist(self):
351 def tagslist(self):
352 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
353 l = []
353 l = []
354 for t, n in self.tags().items():
354 for t, n in self.tags().items():
355 try:
355 try:
356 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
357 except:
357 except:
358 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
359 l.append((r, t, n))
359 l.append((r, t, n))
360 l.sort()
360 l.sort()
361 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
362
362
363 def nodetags(self, node):
363 def nodetags(self, node):
364 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
365 if not self.nodetagscache:
365 if not self.nodetagscache:
366 self.nodetagscache = {}
366 self.nodetagscache = {}
367 for t, n in self.tags().items():
367 for t, n in self.tags().items():
368 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
369 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
370
370
371 def _branchtags(self):
371 def _branchtags(self):
372 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
373
373
374 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
375 if lrev != tiprev:
375 if lrev != tiprev:
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378
378
379 return partial
379 return partial
380
380
381 def branchtags(self):
381 def branchtags(self):
382 if self.branchcache is not None:
382 if self.branchcache is not None:
383 return self.branchcache
383 return self.branchcache
384
384
385 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
386 partial = self._branchtags()
386 partial = self._branchtags()
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
407 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
408 # invalidate the cache
408 # invalidate the cache
409 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
410 for l in lines:
410 for l in lines:
411 if not l: continue
411 if not l: continue
412 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
415 raise
416 except Exception, inst:
416 except Exception, inst:
417 if self.ui.debugflag:
417 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
420 return partial, last, lrev
421
421
422 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
423 try:
423 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
428 f.rename()
429 except (IOError, OSError):
429 except (IOError, OSError):
430 pass
430 pass
431
431
432 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
433 for r in xrange(start, end):
434 c = self.changectx(r)
434 c = self.changectx(r)
435 b = c.branch()
435 b = c.branch()
436 partial[b] = c.node()
436 partial[b] = c.node()
437
437
438 def lookup(self, key):
438 def lookup(self, key):
439 if key == '.':
439 if key == '.':
440 key = self.dirstate.parents()[0]
440 key, second = self.dirstate.parents()
441 if key == nullid:
441 if key == nullid:
442 raise repo.RepoError(_("no revision checked out"))
442 raise repo.RepoError(_("no revision checked out"))
443 if second != nullid:
444 self.ui.warn(_("warning: working directory has two parents, "
445 "tag '.' uses the first\n"))
443 elif key == 'null':
446 elif key == 'null':
444 return nullid
447 return nullid
445 n = self.changelog._match(key)
448 n = self.changelog._match(key)
446 if n:
449 if n:
447 return n
450 return n
448 if key in self.tags():
451 if key in self.tags():
449 return self.tags()[key]
452 return self.tags()[key]
450 if key in self.branchtags():
453 if key in self.branchtags():
451 return self.branchtags()[key]
454 return self.branchtags()[key]
452 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
453 if n:
456 if n:
454 return n
457 return n
455 raise repo.RepoError(_("unknown revision '%s'") % key)
458 raise repo.RepoError(_("unknown revision '%s'") % key)
456
459
457 def dev(self):
460 def dev(self):
458 return os.lstat(self.path).st_dev
461 return os.lstat(self.path).st_dev
459
462
460 def local(self):
463 def local(self):
461 return True
464 return True
462
465
463 def join(self, f):
466 def join(self, f):
464 return os.path.join(self.path, f)
467 return os.path.join(self.path, f)
465
468
466 def sjoin(self, f):
469 def sjoin(self, f):
467 f = self.encodefn(f)
470 f = self.encodefn(f)
468 return os.path.join(self.spath, f)
471 return os.path.join(self.spath, f)
469
472
470 def wjoin(self, f):
473 def wjoin(self, f):
471 return os.path.join(self.root, f)
474 return os.path.join(self.root, f)
472
475
473 def file(self, f):
476 def file(self, f):
474 if f[0] == '/':
477 if f[0] == '/':
475 f = f[1:]
478 f = f[1:]
476 return filelog.filelog(self.sopener, f)
479 return filelog.filelog(self.sopener, f)
477
480
478 def changectx(self, changeid=None):
481 def changectx(self, changeid=None):
479 return context.changectx(self, changeid)
482 return context.changectx(self, changeid)
480
483
481 def workingctx(self):
484 def workingctx(self):
482 return context.workingctx(self)
485 return context.workingctx(self)
483
486
484 def parents(self, changeid=None):
487 def parents(self, changeid=None):
485 '''
488 '''
486 get list of changectxs for parents of changeid or working directory
489 get list of changectxs for parents of changeid or working directory
487 '''
490 '''
488 if changeid is None:
491 if changeid is None:
489 pl = self.dirstate.parents()
492 pl = self.dirstate.parents()
490 else:
493 else:
491 n = self.changelog.lookup(changeid)
494 n = self.changelog.lookup(changeid)
492 pl = self.changelog.parents(n)
495 pl = self.changelog.parents(n)
493 if pl[1] == nullid:
496 if pl[1] == nullid:
494 return [self.changectx(pl[0])]
497 return [self.changectx(pl[0])]
495 return [self.changectx(pl[0]), self.changectx(pl[1])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
496
499
497 def filectx(self, path, changeid=None, fileid=None):
500 def filectx(self, path, changeid=None, fileid=None):
498 """changeid can be a changeset revision, node, or tag.
501 """changeid can be a changeset revision, node, or tag.
499 fileid can be a file revision or node."""
502 fileid can be a file revision or node."""
500 return context.filectx(self, path, changeid, fileid)
503 return context.filectx(self, path, changeid, fileid)
501
504
502 def getcwd(self):
505 def getcwd(self):
503 return self.dirstate.getcwd()
506 return self.dirstate.getcwd()
504
507
505 def wfile(self, f, mode='r'):
508 def wfile(self, f, mode='r'):
506 return self.wopener(f, mode)
509 return self.wopener(f, mode)
507
510
508 def _link(self, f):
511 def _link(self, f):
509 return os.path.islink(self.wjoin(f))
512 return os.path.islink(self.wjoin(f))
510
513
511 def _filter(self, filter, filename, data):
514 def _filter(self, filter, filename, data):
512 if filter not in self.filterpats:
515 if filter not in self.filterpats:
513 l = []
516 l = []
514 for pat, cmd in self.ui.configitems(filter):
517 for pat, cmd in self.ui.configitems(filter):
515 mf = util.matcher(self.root, "", [pat], [], [])[1]
518 mf = util.matcher(self.root, "", [pat], [], [])[1]
516 l.append((mf, cmd))
519 l.append((mf, cmd))
517 self.filterpats[filter] = l
520 self.filterpats[filter] = l
518
521
519 for mf, cmd in self.filterpats[filter]:
522 for mf, cmd in self.filterpats[filter]:
520 if mf(filename):
523 if mf(filename):
521 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
522 data = util.filter(data, cmd)
525 data = util.filter(data, cmd)
523 break
526 break
524
527
525 return data
528 return data
526
529
527 def wread(self, filename):
530 def wread(self, filename):
528 if self._link(filename):
531 if self._link(filename):
529 data = os.readlink(self.wjoin(filename))
532 data = os.readlink(self.wjoin(filename))
530 else:
533 else:
531 data = self.wopener(filename, 'r').read()
534 data = self.wopener(filename, 'r').read()
532 return self._filter("encode", filename, data)
535 return self._filter("encode", filename, data)
533
536
534 def wwrite(self, filename, data, flags):
537 def wwrite(self, filename, data, flags):
535 data = self._filter("decode", filename, data)
538 data = self._filter("decode", filename, data)
536 if "l" in flags:
539 if "l" in flags:
537 f = self.wjoin(filename)
540 f = self.wjoin(filename)
538 try:
541 try:
539 os.unlink(f)
542 os.unlink(f)
540 except OSError:
543 except OSError:
541 pass
544 pass
542 d = os.path.dirname(f)
545 d = os.path.dirname(f)
543 if not os.path.exists(d):
546 if not os.path.exists(d):
544 os.makedirs(d)
547 os.makedirs(d)
545 os.symlink(data, f)
548 os.symlink(data, f)
546 else:
549 else:
547 try:
550 try:
548 if self._link(filename):
551 if self._link(filename):
549 os.unlink(self.wjoin(filename))
552 os.unlink(self.wjoin(filename))
550 except OSError:
553 except OSError:
551 pass
554 pass
552 self.wopener(filename, 'w').write(data)
555 self.wopener(filename, 'w').write(data)
553 util.set_exec(self.wjoin(filename), "x" in flags)
556 util.set_exec(self.wjoin(filename), "x" in flags)
554
557
555 def wwritedata(self, filename, data):
558 def wwritedata(self, filename, data):
556 return self._filter("decode", filename, data)
559 return self._filter("decode", filename, data)
557
560
558 def transaction(self):
561 def transaction(self):
559 tr = self.transhandle
562 tr = self.transhandle
560 if tr != None and tr.running():
563 if tr != None and tr.running():
561 return tr.nest()
564 return tr.nest()
562
565
563 # save dirstate for rollback
566 # save dirstate for rollback
564 try:
567 try:
565 ds = self.opener("dirstate").read()
568 ds = self.opener("dirstate").read()
566 except IOError:
569 except IOError:
567 ds = ""
570 ds = ""
568 self.opener("journal.dirstate", "w").write(ds)
571 self.opener("journal.dirstate", "w").write(ds)
569
572
570 renames = [(self.sjoin("journal"), self.sjoin("undo")),
573 renames = [(self.sjoin("journal"), self.sjoin("undo")),
571 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
574 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
572 tr = transaction.transaction(self.ui.warn, self.sopener,
575 tr = transaction.transaction(self.ui.warn, self.sopener,
573 self.sjoin("journal"),
576 self.sjoin("journal"),
574 aftertrans(renames))
577 aftertrans(renames))
575 self.transhandle = tr
578 self.transhandle = tr
576 return tr
579 return tr
577
580
578 def recover(self):
581 def recover(self):
579 l = self.lock()
582 l = self.lock()
580 if os.path.exists(self.sjoin("journal")):
583 if os.path.exists(self.sjoin("journal")):
581 self.ui.status(_("rolling back interrupted transaction\n"))
584 self.ui.status(_("rolling back interrupted transaction\n"))
582 transaction.rollback(self.sopener, self.sjoin("journal"))
585 transaction.rollback(self.sopener, self.sjoin("journal"))
583 self.reload()
586 self.reload()
584 return True
587 return True
585 else:
588 else:
586 self.ui.warn(_("no interrupted transaction available\n"))
589 self.ui.warn(_("no interrupted transaction available\n"))
587 return False
590 return False
588
591
589 def rollback(self, wlock=None, lock=None):
592 def rollback(self, wlock=None, lock=None):
590 if not wlock:
593 if not wlock:
591 wlock = self.wlock()
594 wlock = self.wlock()
592 if not lock:
595 if not lock:
593 lock = self.lock()
596 lock = self.lock()
594 if os.path.exists(self.sjoin("undo")):
597 if os.path.exists(self.sjoin("undo")):
595 self.ui.status(_("rolling back last transaction\n"))
598 self.ui.status(_("rolling back last transaction\n"))
596 transaction.rollback(self.sopener, self.sjoin("undo"))
599 transaction.rollback(self.sopener, self.sjoin("undo"))
597 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
600 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
598 self.reload()
601 self.reload()
599 self.wreload()
602 self.wreload()
600 else:
603 else:
601 self.ui.warn(_("no rollback information available\n"))
604 self.ui.warn(_("no rollback information available\n"))
602
605
603 def wreload(self):
606 def wreload(self):
604 self.dirstate.reload()
607 self.dirstate.reload()
605
608
606 def reload(self):
609 def reload(self):
607 self.changelog.load()
610 self.changelog.load()
608 self.manifest.load()
611 self.manifest.load()
609 self.tagscache = None
612 self.tagscache = None
610 self.nodetagscache = None
613 self.nodetagscache = None
611
614
612 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
615 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
613 desc=None):
616 desc=None):
614 try:
617 try:
615 l = lock.lock(lockname, 0, releasefn, desc=desc)
618 l = lock.lock(lockname, 0, releasefn, desc=desc)
616 except lock.LockHeld, inst:
619 except lock.LockHeld, inst:
617 if not wait:
620 if not wait:
618 raise
621 raise
619 self.ui.warn(_("waiting for lock on %s held by %r\n") %
622 self.ui.warn(_("waiting for lock on %s held by %r\n") %
620 (desc, inst.locker))
623 (desc, inst.locker))
621 # default to 600 seconds timeout
624 # default to 600 seconds timeout
622 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
625 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
623 releasefn, desc=desc)
626 releasefn, desc=desc)
624 if acquirefn:
627 if acquirefn:
625 acquirefn()
628 acquirefn()
626 return l
629 return l
627
630
628 def lock(self, wait=1):
631 def lock(self, wait=1):
629 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
632 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
630 desc=_('repository %s') % self.origroot)
633 desc=_('repository %s') % self.origroot)
631
634
632 def wlock(self, wait=1):
635 def wlock(self, wait=1):
633 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
636 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
634 self.wreload,
637 self.wreload,
635 desc=_('working directory of %s') % self.origroot)
638 desc=_('working directory of %s') % self.origroot)
636
639
637 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
640 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
638 """
641 """
639 commit an individual file as part of a larger transaction
642 commit an individual file as part of a larger transaction
640 """
643 """
641
644
642 t = self.wread(fn)
645 t = self.wread(fn)
643 fl = self.file(fn)
646 fl = self.file(fn)
644 fp1 = manifest1.get(fn, nullid)
647 fp1 = manifest1.get(fn, nullid)
645 fp2 = manifest2.get(fn, nullid)
648 fp2 = manifest2.get(fn, nullid)
646
649
647 meta = {}
650 meta = {}
648 cp = self.dirstate.copied(fn)
651 cp = self.dirstate.copied(fn)
649 if cp:
652 if cp:
650 # Mark the new revision of this file as a copy of another
653 # Mark the new revision of this file as a copy of another
651 # file. This copy data will effectively act as a parent
654 # file. This copy data will effectively act as a parent
652 # of this new revision. If this is a merge, the first
655 # of this new revision. If this is a merge, the first
653 # parent will be the nullid (meaning "look up the copy data")
656 # parent will be the nullid (meaning "look up the copy data")
654 # and the second one will be the other parent. For example:
657 # and the second one will be the other parent. For example:
655 #
658 #
656 # 0 --- 1 --- 3 rev1 changes file foo
659 # 0 --- 1 --- 3 rev1 changes file foo
657 # \ / rev2 renames foo to bar and changes it
660 # \ / rev2 renames foo to bar and changes it
658 # \- 2 -/ rev3 should have bar with all changes and
661 # \- 2 -/ rev3 should have bar with all changes and
659 # should record that bar descends from
662 # should record that bar descends from
660 # bar in rev2 and foo in rev1
663 # bar in rev2 and foo in rev1
661 #
664 #
662 # this allows this merge to succeed:
665 # this allows this merge to succeed:
663 #
666 #
664 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
667 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
665 # \ / merging rev3 and rev4 should use bar@rev2
668 # \ / merging rev3 and rev4 should use bar@rev2
666 # \- 2 --- 4 as the merge base
669 # \- 2 --- 4 as the merge base
667 #
670 #
668 meta["copy"] = cp
671 meta["copy"] = cp
669 if not manifest2: # not a branch merge
672 if not manifest2: # not a branch merge
670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
673 meta["copyrev"] = hex(manifest1.get(cp, nullid))
671 fp2 = nullid
674 fp2 = nullid
672 elif fp2 != nullid: # copied on remote side
675 elif fp2 != nullid: # copied on remote side
673 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
674 elif fp1 != nullid: # copied on local side, reversed
677 elif fp1 != nullid: # copied on local side, reversed
675 meta["copyrev"] = hex(manifest2.get(cp))
678 meta["copyrev"] = hex(manifest2.get(cp))
676 fp2 = fp1
679 fp2 = fp1
677 else: # directory rename
680 else: # directory rename
678 meta["copyrev"] = hex(manifest1.get(cp, nullid))
681 meta["copyrev"] = hex(manifest1.get(cp, nullid))
679 self.ui.debug(_(" %s: copy %s:%s\n") %
682 self.ui.debug(_(" %s: copy %s:%s\n") %
680 (fn, cp, meta["copyrev"]))
683 (fn, cp, meta["copyrev"]))
681 fp1 = nullid
684 fp1 = nullid
682 elif fp2 != nullid:
685 elif fp2 != nullid:
683 # is one parent an ancestor of the other?
686 # is one parent an ancestor of the other?
684 fpa = fl.ancestor(fp1, fp2)
687 fpa = fl.ancestor(fp1, fp2)
685 if fpa == fp1:
688 if fpa == fp1:
686 fp1, fp2 = fp2, nullid
689 fp1, fp2 = fp2, nullid
687 elif fpa == fp2:
690 elif fpa == fp2:
688 fp2 = nullid
691 fp2 = nullid
689
692
690 # is the file unmodified from the parent? report existing entry
693 # is the file unmodified from the parent? report existing entry
691 if fp2 == nullid and not fl.cmp(fp1, t):
694 if fp2 == nullid and not fl.cmp(fp1, t):
692 return fp1
695 return fp1
693
696
694 changelist.append(fn)
697 changelist.append(fn)
695 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
698 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
696
699
697 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
700 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
698 if p1 is None:
701 if p1 is None:
699 p1, p2 = self.dirstate.parents()
702 p1, p2 = self.dirstate.parents()
700 return self.commit(files=files, text=text, user=user, date=date,
703 return self.commit(files=files, text=text, user=user, date=date,
701 p1=p1, p2=p2, wlock=wlock, extra=extra)
704 p1=p1, p2=p2, wlock=wlock, extra=extra)
702
705
703 def commit(self, files=None, text="", user=None, date=None,
706 def commit(self, files=None, text="", user=None, date=None,
704 match=util.always, force=False, lock=None, wlock=None,
707 match=util.always, force=False, lock=None, wlock=None,
705 force_editor=False, p1=None, p2=None, extra={}):
708 force_editor=False, p1=None, p2=None, extra={}):
706
709
707 commit = []
710 commit = []
708 remove = []
711 remove = []
709 changed = []
712 changed = []
710 use_dirstate = (p1 is None) # not rawcommit
713 use_dirstate = (p1 is None) # not rawcommit
711 extra = extra.copy()
714 extra = extra.copy()
712
715
713 if use_dirstate:
716 if use_dirstate:
714 if files:
717 if files:
715 for f in files:
718 for f in files:
716 s = self.dirstate.state(f)
719 s = self.dirstate.state(f)
717 if s in 'nmai':
720 if s in 'nmai':
718 commit.append(f)
721 commit.append(f)
719 elif s == 'r':
722 elif s == 'r':
720 remove.append(f)
723 remove.append(f)
721 else:
724 else:
722 self.ui.warn(_("%s not tracked!\n") % f)
725 self.ui.warn(_("%s not tracked!\n") % f)
723 else:
726 else:
724 changes = self.status(match=match)[:5]
727 changes = self.status(match=match)[:5]
725 modified, added, removed, deleted, unknown = changes
728 modified, added, removed, deleted, unknown = changes
726 commit = modified + added
729 commit = modified + added
727 remove = removed
730 remove = removed
728 else:
731 else:
729 commit = files
732 commit = files
730
733
731 if use_dirstate:
734 if use_dirstate:
732 p1, p2 = self.dirstate.parents()
735 p1, p2 = self.dirstate.parents()
733 update_dirstate = True
736 update_dirstate = True
734 else:
737 else:
735 p1, p2 = p1, p2 or nullid
738 p1, p2 = p1, p2 or nullid
736 update_dirstate = (self.dirstate.parents()[0] == p1)
739 update_dirstate = (self.dirstate.parents()[0] == p1)
737
740
738 c1 = self.changelog.read(p1)
741 c1 = self.changelog.read(p1)
739 c2 = self.changelog.read(p2)
742 c2 = self.changelog.read(p2)
740 m1 = self.manifest.read(c1[0]).copy()
743 m1 = self.manifest.read(c1[0]).copy()
741 m2 = self.manifest.read(c2[0])
744 m2 = self.manifest.read(c2[0])
742
745
743 if use_dirstate:
746 if use_dirstate:
744 branchname = self.workingctx().branch()
747 branchname = self.workingctx().branch()
745 try:
748 try:
746 branchname = branchname.decode('UTF-8').encode('UTF-8')
749 branchname = branchname.decode('UTF-8').encode('UTF-8')
747 except UnicodeDecodeError:
750 except UnicodeDecodeError:
748 raise util.Abort(_('branch name not in UTF-8!'))
751 raise util.Abort(_('branch name not in UTF-8!'))
749 else:
752 else:
750 branchname = ""
753 branchname = ""
751
754
752 if use_dirstate:
755 if use_dirstate:
753 oldname = c1[5].get("branch") # stored in UTF-8
756 oldname = c1[5].get("branch") # stored in UTF-8
754 if not commit and not remove and not force and p2 == nullid and \
757 if not commit and not remove and not force and p2 == nullid and \
755 branchname == oldname:
758 branchname == oldname:
756 self.ui.status(_("nothing changed\n"))
759 self.ui.status(_("nothing changed\n"))
757 return None
760 return None
758
761
759 xp1 = hex(p1)
762 xp1 = hex(p1)
760 if p2 == nullid: xp2 = ''
763 if p2 == nullid: xp2 = ''
761 else: xp2 = hex(p2)
764 else: xp2 = hex(p2)
762
765
763 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
766 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
764
767
765 if not wlock:
768 if not wlock:
766 wlock = self.wlock()
769 wlock = self.wlock()
767 if not lock:
770 if not lock:
768 lock = self.lock()
771 lock = self.lock()
769 tr = self.transaction()
772 tr = self.transaction()
770
773
771 # check in files
774 # check in files
772 new = {}
775 new = {}
773 linkrev = self.changelog.count()
776 linkrev = self.changelog.count()
774 commit.sort()
777 commit.sort()
775 is_exec = util.execfunc(self.root, m1.execf)
778 is_exec = util.execfunc(self.root, m1.execf)
776 is_link = util.linkfunc(self.root, m1.linkf)
779 is_link = util.linkfunc(self.root, m1.linkf)
777 for f in commit:
780 for f in commit:
778 self.ui.note(f + "\n")
781 self.ui.note(f + "\n")
779 try:
782 try:
780 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
783 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
781 m1.set(f, is_exec(f), is_link(f))
784 m1.set(f, is_exec(f), is_link(f))
782 except (OSError, IOError):
785 except (OSError, IOError):
783 if use_dirstate:
786 if use_dirstate:
784 self.ui.warn(_("trouble committing %s!\n") % f)
787 self.ui.warn(_("trouble committing %s!\n") % f)
785 raise
788 raise
786 else:
789 else:
787 remove.append(f)
790 remove.append(f)
788
791
789 # update manifest
792 # update manifest
790 m1.update(new)
793 m1.update(new)
791 remove.sort()
794 remove.sort()
792 removed = []
795 removed = []
793
796
794 for f in remove:
797 for f in remove:
795 if f in m1:
798 if f in m1:
796 del m1[f]
799 del m1[f]
797 removed.append(f)
800 removed.append(f)
798 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
801 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
799
802
800 # add changeset
803 # add changeset
801 new = new.keys()
804 new = new.keys()
802 new.sort()
805 new.sort()
803
806
804 user = user or self.ui.username()
807 user = user or self.ui.username()
805 if not text or force_editor:
808 if not text or force_editor:
806 edittext = []
809 edittext = []
807 if text:
810 if text:
808 edittext.append(text)
811 edittext.append(text)
809 edittext.append("")
812 edittext.append("")
810 edittext.append("HG: user: %s" % user)
813 edittext.append("HG: user: %s" % user)
811 if p2 != nullid:
814 if p2 != nullid:
812 edittext.append("HG: branch merge")
815 edittext.append("HG: branch merge")
813 if branchname:
816 if branchname:
814 edittext.append("HG: branch %s" % util.tolocal(branchname))
817 edittext.append("HG: branch %s" % util.tolocal(branchname))
815 edittext.extend(["HG: changed %s" % f for f in changed])
818 edittext.extend(["HG: changed %s" % f for f in changed])
816 edittext.extend(["HG: removed %s" % f for f in removed])
819 edittext.extend(["HG: removed %s" % f for f in removed])
817 if not changed and not remove:
820 if not changed and not remove:
818 edittext.append("HG: no files changed")
821 edittext.append("HG: no files changed")
819 edittext.append("")
822 edittext.append("")
820 # run editor in the repository root
823 # run editor in the repository root
821 olddir = os.getcwd()
824 olddir = os.getcwd()
822 os.chdir(self.root)
825 os.chdir(self.root)
823 text = self.ui.edit("\n".join(edittext), user)
826 text = self.ui.edit("\n".join(edittext), user)
824 os.chdir(olddir)
827 os.chdir(olddir)
825
828
826 lines = [line.rstrip() for line in text.rstrip().splitlines()]
829 lines = [line.rstrip() for line in text.rstrip().splitlines()]
827 while lines and not lines[0]:
830 while lines and not lines[0]:
828 del lines[0]
831 del lines[0]
829 if not lines:
832 if not lines:
830 return None
833 return None
831 text = '\n'.join(lines)
834 text = '\n'.join(lines)
832 if branchname:
835 if branchname:
833 extra["branch"] = branchname
836 extra["branch"] = branchname
834 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
837 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
835 user, date, extra)
838 user, date, extra)
836 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
839 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
837 parent2=xp2)
840 parent2=xp2)
838 tr.close()
841 tr.close()
839
842
840 if self.branchcache and "branch" in extra:
843 if self.branchcache and "branch" in extra:
841 self.branchcache[util.tolocal(extra["branch"])] = n
844 self.branchcache[util.tolocal(extra["branch"])] = n
842
845
843 if use_dirstate or update_dirstate:
846 if use_dirstate or update_dirstate:
844 self.dirstate.setparents(n)
847 self.dirstate.setparents(n)
845 if use_dirstate:
848 if use_dirstate:
846 self.dirstate.update(new, "n")
849 self.dirstate.update(new, "n")
847 self.dirstate.forget(removed)
850 self.dirstate.forget(removed)
848
851
849 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
852 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
850 return n
853 return n
851
854
852 def walk(self, node=None, files=[], match=util.always, badmatch=None):
855 def walk(self, node=None, files=[], match=util.always, badmatch=None):
853 '''
856 '''
854 walk recursively through the directory tree or a given
857 walk recursively through the directory tree or a given
855 changeset, finding all files matched by the match
858 changeset, finding all files matched by the match
856 function
859 function
857
860
858 results are yielded in a tuple (src, filename), where src
861 results are yielded in a tuple (src, filename), where src
859 is one of:
862 is one of:
860 'f' the file was found in the directory tree
863 'f' the file was found in the directory tree
861 'm' the file was only in the dirstate and not in the tree
864 'm' the file was only in the dirstate and not in the tree
862 'b' file was not found and matched badmatch
865 'b' file was not found and matched badmatch
863 '''
866 '''
864
867
865 if node:
868 if node:
866 fdict = dict.fromkeys(files)
869 fdict = dict.fromkeys(files)
867 # for dirstate.walk, files=['.'] means "walk the whole tree".
870 # for dirstate.walk, files=['.'] means "walk the whole tree".
868 # follow that here, too
871 # follow that here, too
869 fdict.pop('.', None)
872 fdict.pop('.', None)
870 mdict = self.manifest.read(self.changelog.read(node)[0])
873 mdict = self.manifest.read(self.changelog.read(node)[0])
871 mfiles = mdict.keys()
874 mfiles = mdict.keys()
872 mfiles.sort()
875 mfiles.sort()
873 for fn in mfiles:
876 for fn in mfiles:
874 for ffn in fdict:
877 for ffn in fdict:
875 # match if the file is the exact name or a directory
878 # match if the file is the exact name or a directory
876 if ffn == fn or fn.startswith("%s/" % ffn):
879 if ffn == fn or fn.startswith("%s/" % ffn):
877 del fdict[ffn]
880 del fdict[ffn]
878 break
881 break
879 if match(fn):
882 if match(fn):
880 yield 'm', fn
883 yield 'm', fn
881 ffiles = fdict.keys()
884 ffiles = fdict.keys()
882 ffiles.sort()
885 ffiles.sort()
883 for fn in ffiles:
886 for fn in ffiles:
884 if badmatch and badmatch(fn):
887 if badmatch and badmatch(fn):
885 if match(fn):
888 if match(fn):
886 yield 'b', fn
889 yield 'b', fn
887 else:
890 else:
888 self.ui.warn(_('%s: No such file in rev %s\n') % (
891 self.ui.warn(_('%s: No such file in rev %s\n') % (
889 util.pathto(self.root, self.getcwd(), fn), short(node)))
892 util.pathto(self.root, self.getcwd(), fn), short(node)))
890 else:
893 else:
891 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
894 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
892 yield src, fn
895 yield src, fn
893
896
894 def status(self, node1=None, node2=None, files=[], match=util.always,
897 def status(self, node1=None, node2=None, files=[], match=util.always,
895 wlock=None, list_ignored=False, list_clean=False):
898 wlock=None, list_ignored=False, list_clean=False):
896 """return status of files between two nodes or node and working directory
899 """return status of files between two nodes or node and working directory
897
900
898 If node1 is None, use the first dirstate parent instead.
901 If node1 is None, use the first dirstate parent instead.
899 If node2 is None, compare node1 with working directory.
902 If node2 is None, compare node1 with working directory.
900 """
903 """
901
904
902 def fcmp(fn, getnode):
905 def fcmp(fn, getnode):
903 t1 = self.wread(fn)
906 t1 = self.wread(fn)
904 return self.file(fn).cmp(getnode(fn), t1)
907 return self.file(fn).cmp(getnode(fn), t1)
905
908
906 def mfmatches(node):
909 def mfmatches(node):
907 change = self.changelog.read(node)
910 change = self.changelog.read(node)
908 mf = self.manifest.read(change[0]).copy()
911 mf = self.manifest.read(change[0]).copy()
909 for fn in mf.keys():
912 for fn in mf.keys():
910 if not match(fn):
913 if not match(fn):
911 del mf[fn]
914 del mf[fn]
912 return mf
915 return mf
913
916
914 modified, added, removed, deleted, unknown = [], [], [], [], []
917 modified, added, removed, deleted, unknown = [], [], [], [], []
915 ignored, clean = [], []
918 ignored, clean = [], []
916
919
917 compareworking = False
920 compareworking = False
918 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
921 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
919 compareworking = True
922 compareworking = True
920
923
921 if not compareworking:
924 if not compareworking:
922 # read the manifest from node1 before the manifest from node2,
925 # read the manifest from node1 before the manifest from node2,
923 # so that we'll hit the manifest cache if we're going through
926 # so that we'll hit the manifest cache if we're going through
924 # all the revisions in parent->child order.
927 # all the revisions in parent->child order.
925 mf1 = mfmatches(node1)
928 mf1 = mfmatches(node1)
926
929
927 mywlock = False
930 mywlock = False
928
931
929 # are we comparing the working directory?
932 # are we comparing the working directory?
930 if not node2:
933 if not node2:
931 (lookup, modified, added, removed, deleted, unknown,
934 (lookup, modified, added, removed, deleted, unknown,
932 ignored, clean) = self.dirstate.status(files, match,
935 ignored, clean) = self.dirstate.status(files, match,
933 list_ignored, list_clean)
936 list_ignored, list_clean)
934
937
935 # are we comparing working dir against its parent?
938 # are we comparing working dir against its parent?
936 if compareworking:
939 if compareworking:
937 if lookup:
940 if lookup:
938 # do a full compare of any files that might have changed
941 # do a full compare of any files that might have changed
939 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
942 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
940 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
943 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
941 nullid)
944 nullid)
942 for f in lookup:
945 for f in lookup:
943 if fcmp(f, getnode):
946 if fcmp(f, getnode):
944 modified.append(f)
947 modified.append(f)
945 else:
948 else:
946 clean.append(f)
949 clean.append(f)
947 if not wlock and not mywlock:
950 if not wlock and not mywlock:
948 mywlock = True
951 mywlock = True
949 try:
952 try:
950 wlock = self.wlock(wait=0)
953 wlock = self.wlock(wait=0)
951 except lock.LockException:
954 except lock.LockException:
952 pass
955 pass
953 if wlock:
956 if wlock:
954 self.dirstate.update([f], "n")
957 self.dirstate.update([f], "n")
955 else:
958 else:
956 # we are comparing working dir against non-parent
959 # we are comparing working dir against non-parent
957 # generate a pseudo-manifest for the working dir
960 # generate a pseudo-manifest for the working dir
958 # XXX: create it in dirstate.py ?
961 # XXX: create it in dirstate.py ?
959 mf2 = mfmatches(self.dirstate.parents()[0])
962 mf2 = mfmatches(self.dirstate.parents()[0])
960 is_exec = util.execfunc(self.root, mf2.execf)
963 is_exec = util.execfunc(self.root, mf2.execf)
961 is_link = util.linkfunc(self.root, mf2.linkf)
964 is_link = util.linkfunc(self.root, mf2.linkf)
962 for f in lookup + modified + added:
965 for f in lookup + modified + added:
963 mf2[f] = ""
966 mf2[f] = ""
964 mf2.set(f, is_exec(f), is_link(f))
967 mf2.set(f, is_exec(f), is_link(f))
965 for f in removed:
968 for f in removed:
966 if f in mf2:
969 if f in mf2:
967 del mf2[f]
970 del mf2[f]
968
971
969 if mywlock and wlock:
972 if mywlock and wlock:
970 wlock.release()
973 wlock.release()
971 else:
974 else:
972 # we are comparing two revisions
975 # we are comparing two revisions
973 mf2 = mfmatches(node2)
976 mf2 = mfmatches(node2)
974
977
975 if not compareworking:
978 if not compareworking:
976 # flush lists from dirstate before comparing manifests
979 # flush lists from dirstate before comparing manifests
977 modified, added, clean = [], [], []
980 modified, added, clean = [], [], []
978
981
979 # make sure to sort the files so we talk to the disk in a
982 # make sure to sort the files so we talk to the disk in a
980 # reasonable order
983 # reasonable order
981 mf2keys = mf2.keys()
984 mf2keys = mf2.keys()
982 mf2keys.sort()
985 mf2keys.sort()
983 getnode = lambda fn: mf1.get(fn, nullid)
986 getnode = lambda fn: mf1.get(fn, nullid)
984 for fn in mf2keys:
987 for fn in mf2keys:
985 if mf1.has_key(fn):
988 if mf1.has_key(fn):
986 if mf1.flags(fn) != mf2.flags(fn) or \
989 if mf1.flags(fn) != mf2.flags(fn) or \
987 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
990 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
988 fcmp(fn, getnode))):
991 fcmp(fn, getnode))):
989 modified.append(fn)
992 modified.append(fn)
990 elif list_clean:
993 elif list_clean:
991 clean.append(fn)
994 clean.append(fn)
992 del mf1[fn]
995 del mf1[fn]
993 else:
996 else:
994 added.append(fn)
997 added.append(fn)
995
998
996 removed = mf1.keys()
999 removed = mf1.keys()
997
1000
998 # sort and return results:
1001 # sort and return results:
999 for l in modified, added, removed, deleted, unknown, ignored, clean:
1002 for l in modified, added, removed, deleted, unknown, ignored, clean:
1000 l.sort()
1003 l.sort()
1001 return (modified, added, removed, deleted, unknown, ignored, clean)
1004 return (modified, added, removed, deleted, unknown, ignored, clean)
1002
1005
1003 def add(self, list, wlock=None):
1006 def add(self, list, wlock=None):
1004 if not wlock:
1007 if not wlock:
1005 wlock = self.wlock()
1008 wlock = self.wlock()
1006 for f in list:
1009 for f in list:
1007 p = self.wjoin(f)
1010 p = self.wjoin(f)
1008 islink = os.path.islink(p)
1011 islink = os.path.islink(p)
1009 size = os.lstat(p).st_size
1012 size = os.lstat(p).st_size
1010 if size > 10000000:
1013 if size > 10000000:
1011 self.ui.warn(_("%s: files over 10MB may cause memory and"
1014 self.ui.warn(_("%s: files over 10MB may cause memory and"
1012 " performance problems\n"
1015 " performance problems\n"
1013 "(use 'hg revert %s' to unadd the file)\n")
1016 "(use 'hg revert %s' to unadd the file)\n")
1014 % (f, f))
1017 % (f, f))
1015 if not islink and not os.path.exists(p):
1018 if not islink and not os.path.exists(p):
1016 self.ui.warn(_("%s does not exist!\n") % f)
1019 self.ui.warn(_("%s does not exist!\n") % f)
1017 elif not islink and not os.path.isfile(p):
1020 elif not islink and not os.path.isfile(p):
1018 self.ui.warn(_("%s not added: only files and symlinks "
1021 self.ui.warn(_("%s not added: only files and symlinks "
1019 "supported currently\n") % f)
1022 "supported currently\n") % f)
1020 elif self.dirstate.state(f) in 'an':
1023 elif self.dirstate.state(f) in 'an':
1021 self.ui.warn(_("%s already tracked!\n") % f)
1024 self.ui.warn(_("%s already tracked!\n") % f)
1022 else:
1025 else:
1023 self.dirstate.update([f], "a")
1026 self.dirstate.update([f], "a")
1024
1027
1025 def forget(self, list, wlock=None):
1028 def forget(self, list, wlock=None):
1026 if not wlock:
1029 if not wlock:
1027 wlock = self.wlock()
1030 wlock = self.wlock()
1028 for f in list:
1031 for f in list:
1029 if self.dirstate.state(f) not in 'ai':
1032 if self.dirstate.state(f) not in 'ai':
1030 self.ui.warn(_("%s not added!\n") % f)
1033 self.ui.warn(_("%s not added!\n") % f)
1031 else:
1034 else:
1032 self.dirstate.forget([f])
1035 self.dirstate.forget([f])
1033
1036
1034 def remove(self, list, unlink=False, wlock=None):
1037 def remove(self, list, unlink=False, wlock=None):
1035 if unlink:
1038 if unlink:
1036 for f in list:
1039 for f in list:
1037 try:
1040 try:
1038 util.unlink(self.wjoin(f))
1041 util.unlink(self.wjoin(f))
1039 except OSError, inst:
1042 except OSError, inst:
1040 if inst.errno != errno.ENOENT:
1043 if inst.errno != errno.ENOENT:
1041 raise
1044 raise
1042 if not wlock:
1045 if not wlock:
1043 wlock = self.wlock()
1046 wlock = self.wlock()
1044 for f in list:
1047 for f in list:
1045 if unlink and os.path.exists(self.wjoin(f)):
1048 if unlink and os.path.exists(self.wjoin(f)):
1046 self.ui.warn(_("%s still exists!\n") % f)
1049 self.ui.warn(_("%s still exists!\n") % f)
1047 elif self.dirstate.state(f) == 'a':
1050 elif self.dirstate.state(f) == 'a':
1048 self.dirstate.forget([f])
1051 self.dirstate.forget([f])
1049 elif f not in self.dirstate:
1052 elif f not in self.dirstate:
1050 self.ui.warn(_("%s not tracked!\n") % f)
1053 self.ui.warn(_("%s not tracked!\n") % f)
1051 else:
1054 else:
1052 self.dirstate.update([f], "r")
1055 self.dirstate.update([f], "r")
1053
1056
1054 def undelete(self, list, wlock=None):
1057 def undelete(self, list, wlock=None):
1055 p = self.dirstate.parents()[0]
1058 p = self.dirstate.parents()[0]
1056 mn = self.changelog.read(p)[0]
1059 mn = self.changelog.read(p)[0]
1057 m = self.manifest.read(mn)
1060 m = self.manifest.read(mn)
1058 if not wlock:
1061 if not wlock:
1059 wlock = self.wlock()
1062 wlock = self.wlock()
1060 for f in list:
1063 for f in list:
1061 if self.dirstate.state(f) not in "r":
1064 if self.dirstate.state(f) not in "r":
1062 self.ui.warn("%s not removed!\n" % f)
1065 self.ui.warn("%s not removed!\n" % f)
1063 else:
1066 else:
1064 t = self.file(f).read(m[f])
1067 t = self.file(f).read(m[f])
1065 self.wwrite(f, t, m.flags(f))
1068 self.wwrite(f, t, m.flags(f))
1066 self.dirstate.update([f], "n")
1069 self.dirstate.update([f], "n")
1067
1070
1068 def copy(self, source, dest, wlock=None):
1071 def copy(self, source, dest, wlock=None):
1069 p = self.wjoin(dest)
1072 p = self.wjoin(dest)
1070 if not (os.path.exists(p) or os.path.islink(p)):
1073 if not (os.path.exists(p) or os.path.islink(p)):
1071 self.ui.warn(_("%s does not exist!\n") % dest)
1074 self.ui.warn(_("%s does not exist!\n") % dest)
1072 elif not (os.path.isfile(p) or os.path.islink(p)):
1075 elif not (os.path.isfile(p) or os.path.islink(p)):
1073 self.ui.warn(_("copy failed: %s is not a file or a "
1076 self.ui.warn(_("copy failed: %s is not a file or a "
1074 "symbolic link\n") % dest)
1077 "symbolic link\n") % dest)
1075 else:
1078 else:
1076 if not wlock:
1079 if not wlock:
1077 wlock = self.wlock()
1080 wlock = self.wlock()
1078 if self.dirstate.state(dest) == '?':
1081 if self.dirstate.state(dest) == '?':
1079 self.dirstate.update([dest], "a")
1082 self.dirstate.update([dest], "a")
1080 self.dirstate.copy(source, dest)
1083 self.dirstate.copy(source, dest)
1081
1084
1082 def heads(self, start=None):
1085 def heads(self, start=None):
1083 heads = self.changelog.heads(start)
1086 heads = self.changelog.heads(start)
1084 # sort the output in rev descending order
1087 # sort the output in rev descending order
1085 heads = [(-self.changelog.rev(h), h) for h in heads]
1088 heads = [(-self.changelog.rev(h), h) for h in heads]
1086 heads.sort()
1089 heads.sort()
1087 return [n for (r, n) in heads]
1090 return [n for (r, n) in heads]
1088
1091
1089 def branches(self, nodes):
1092 def branches(self, nodes):
1090 if not nodes:
1093 if not nodes:
1091 nodes = [self.changelog.tip()]
1094 nodes = [self.changelog.tip()]
1092 b = []
1095 b = []
1093 for n in nodes:
1096 for n in nodes:
1094 t = n
1097 t = n
1095 while 1:
1098 while 1:
1096 p = self.changelog.parents(n)
1099 p = self.changelog.parents(n)
1097 if p[1] != nullid or p[0] == nullid:
1100 if p[1] != nullid or p[0] == nullid:
1098 b.append((t, n, p[0], p[1]))
1101 b.append((t, n, p[0], p[1]))
1099 break
1102 break
1100 n = p[0]
1103 n = p[0]
1101 return b
1104 return b
1102
1105
1103 def between(self, pairs):
1106 def between(self, pairs):
1104 r = []
1107 r = []
1105
1108
1106 for top, bottom in pairs:
1109 for top, bottom in pairs:
1107 n, l, i = top, [], 0
1110 n, l, i = top, [], 0
1108 f = 1
1111 f = 1
1109
1112
1110 while n != bottom:
1113 while n != bottom:
1111 p = self.changelog.parents(n)[0]
1114 p = self.changelog.parents(n)[0]
1112 if i == f:
1115 if i == f:
1113 l.append(n)
1116 l.append(n)
1114 f = f * 2
1117 f = f * 2
1115 n = p
1118 n = p
1116 i += 1
1119 i += 1
1117
1120
1118 r.append(l)
1121 r.append(l)
1119
1122
1120 return r
1123 return r
1121
1124
1122 def findincoming(self, remote, base=None, heads=None, force=False):
1125 def findincoming(self, remote, base=None, heads=None, force=False):
1123 """Return list of roots of the subsets of missing nodes from remote
1126 """Return list of roots of the subsets of missing nodes from remote
1124
1127
1125 If base dict is specified, assume that these nodes and their parents
1128 If base dict is specified, assume that these nodes and their parents
1126 exist on the remote side and that no child of a node of base exists
1129 exist on the remote side and that no child of a node of base exists
1127 in both remote and self.
1130 in both remote and self.
1128 Furthermore base will be updated to include the nodes that exists
1131 Furthermore base will be updated to include the nodes that exists
1129 in self and remote but no children exists in self and remote.
1132 in self and remote but no children exists in self and remote.
1130 If a list of heads is specified, return only nodes which are heads
1133 If a list of heads is specified, return only nodes which are heads
1131 or ancestors of these heads.
1134 or ancestors of these heads.
1132
1135
1133 All the ancestors of base are in self and in remote.
1136 All the ancestors of base are in self and in remote.
1134 All the descendants of the list returned are missing in self.
1137 All the descendants of the list returned are missing in self.
1135 (and so we know that the rest of the nodes are missing in remote, see
1138 (and so we know that the rest of the nodes are missing in remote, see
1136 outgoing)
1139 outgoing)
1137 """
1140 """
1138 m = self.changelog.nodemap
1141 m = self.changelog.nodemap
1139 search = []
1142 search = []
1140 fetch = {}
1143 fetch = {}
1141 seen = {}
1144 seen = {}
1142 seenbranch = {}
1145 seenbranch = {}
1143 if base == None:
1146 if base == None:
1144 base = {}
1147 base = {}
1145
1148
1146 if not heads:
1149 if not heads:
1147 heads = remote.heads()
1150 heads = remote.heads()
1148
1151
1149 if self.changelog.tip() == nullid:
1152 if self.changelog.tip() == nullid:
1150 base[nullid] = 1
1153 base[nullid] = 1
1151 if heads != [nullid]:
1154 if heads != [nullid]:
1152 return [nullid]
1155 return [nullid]
1153 return []
1156 return []
1154
1157
1155 # assume we're closer to the tip than the root
1158 # assume we're closer to the tip than the root
1156 # and start by examining the heads
1159 # and start by examining the heads
1157 self.ui.status(_("searching for changes\n"))
1160 self.ui.status(_("searching for changes\n"))
1158
1161
1159 unknown = []
1162 unknown = []
1160 for h in heads:
1163 for h in heads:
1161 if h not in m:
1164 if h not in m:
1162 unknown.append(h)
1165 unknown.append(h)
1163 else:
1166 else:
1164 base[h] = 1
1167 base[h] = 1
1165
1168
1166 if not unknown:
1169 if not unknown:
1167 return []
1170 return []
1168
1171
1169 req = dict.fromkeys(unknown)
1172 req = dict.fromkeys(unknown)
1170 reqcnt = 0
1173 reqcnt = 0
1171
1174
1172 # search through remote branches
1175 # search through remote branches
1173 # a 'branch' here is a linear segment of history, with four parts:
1176 # a 'branch' here is a linear segment of history, with four parts:
1174 # head, root, first parent, second parent
1177 # head, root, first parent, second parent
1175 # (a branch always has two parents (or none) by definition)
1178 # (a branch always has two parents (or none) by definition)
1176 unknown = remote.branches(unknown)
1179 unknown = remote.branches(unknown)
1177 while unknown:
1180 while unknown:
1178 r = []
1181 r = []
1179 while unknown:
1182 while unknown:
1180 n = unknown.pop(0)
1183 n = unknown.pop(0)
1181 if n[0] in seen:
1184 if n[0] in seen:
1182 continue
1185 continue
1183
1186
1184 self.ui.debug(_("examining %s:%s\n")
1187 self.ui.debug(_("examining %s:%s\n")
1185 % (short(n[0]), short(n[1])))
1188 % (short(n[0]), short(n[1])))
1186 if n[0] == nullid: # found the end of the branch
1189 if n[0] == nullid: # found the end of the branch
1187 pass
1190 pass
1188 elif n in seenbranch:
1191 elif n in seenbranch:
1189 self.ui.debug(_("branch already found\n"))
1192 self.ui.debug(_("branch already found\n"))
1190 continue
1193 continue
1191 elif n[1] and n[1] in m: # do we know the base?
1194 elif n[1] and n[1] in m: # do we know the base?
1192 self.ui.debug(_("found incomplete branch %s:%s\n")
1195 self.ui.debug(_("found incomplete branch %s:%s\n")
1193 % (short(n[0]), short(n[1])))
1196 % (short(n[0]), short(n[1])))
1194 search.append(n) # schedule branch range for scanning
1197 search.append(n) # schedule branch range for scanning
1195 seenbranch[n] = 1
1198 seenbranch[n] = 1
1196 else:
1199 else:
1197 if n[1] not in seen and n[1] not in fetch:
1200 if n[1] not in seen and n[1] not in fetch:
1198 if n[2] in m and n[3] in m:
1201 if n[2] in m and n[3] in m:
1199 self.ui.debug(_("found new changeset %s\n") %
1202 self.ui.debug(_("found new changeset %s\n") %
1200 short(n[1]))
1203 short(n[1]))
1201 fetch[n[1]] = 1 # earliest unknown
1204 fetch[n[1]] = 1 # earliest unknown
1202 for p in n[2:4]:
1205 for p in n[2:4]:
1203 if p in m:
1206 if p in m:
1204 base[p] = 1 # latest known
1207 base[p] = 1 # latest known
1205
1208
1206 for p in n[2:4]:
1209 for p in n[2:4]:
1207 if p not in req and p not in m:
1210 if p not in req and p not in m:
1208 r.append(p)
1211 r.append(p)
1209 req[p] = 1
1212 req[p] = 1
1210 seen[n[0]] = 1
1213 seen[n[0]] = 1
1211
1214
1212 if r:
1215 if r:
1213 reqcnt += 1
1216 reqcnt += 1
1214 self.ui.debug(_("request %d: %s\n") %
1217 self.ui.debug(_("request %d: %s\n") %
1215 (reqcnt, " ".join(map(short, r))))
1218 (reqcnt, " ".join(map(short, r))))
1216 for p in xrange(0, len(r), 10):
1219 for p in xrange(0, len(r), 10):
1217 for b in remote.branches(r[p:p+10]):
1220 for b in remote.branches(r[p:p+10]):
1218 self.ui.debug(_("received %s:%s\n") %
1221 self.ui.debug(_("received %s:%s\n") %
1219 (short(b[0]), short(b[1])))
1222 (short(b[0]), short(b[1])))
1220 unknown.append(b)
1223 unknown.append(b)
1221
1224
1222 # do binary search on the branches we found
1225 # do binary search on the branches we found
1223 while search:
1226 while search:
1224 n = search.pop(0)
1227 n = search.pop(0)
1225 reqcnt += 1
1228 reqcnt += 1
1226 l = remote.between([(n[0], n[1])])[0]
1229 l = remote.between([(n[0], n[1])])[0]
1227 l.append(n[1])
1230 l.append(n[1])
1228 p = n[0]
1231 p = n[0]
1229 f = 1
1232 f = 1
1230 for i in l:
1233 for i in l:
1231 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1234 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1232 if i in m:
1235 if i in m:
1233 if f <= 2:
1236 if f <= 2:
1234 self.ui.debug(_("found new branch changeset %s\n") %
1237 self.ui.debug(_("found new branch changeset %s\n") %
1235 short(p))
1238 short(p))
1236 fetch[p] = 1
1239 fetch[p] = 1
1237 base[i] = 1
1240 base[i] = 1
1238 else:
1241 else:
1239 self.ui.debug(_("narrowed branch search to %s:%s\n")
1242 self.ui.debug(_("narrowed branch search to %s:%s\n")
1240 % (short(p), short(i)))
1243 % (short(p), short(i)))
1241 search.append((p, i))
1244 search.append((p, i))
1242 break
1245 break
1243 p, f = i, f * 2
1246 p, f = i, f * 2
1244
1247
1245 # sanity check our fetch list
1248 # sanity check our fetch list
1246 for f in fetch.keys():
1249 for f in fetch.keys():
1247 if f in m:
1250 if f in m:
1248 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1251 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1249
1252
1250 if base.keys() == [nullid]:
1253 if base.keys() == [nullid]:
1251 if force:
1254 if force:
1252 self.ui.warn(_("warning: repository is unrelated\n"))
1255 self.ui.warn(_("warning: repository is unrelated\n"))
1253 else:
1256 else:
1254 raise util.Abort(_("repository is unrelated"))
1257 raise util.Abort(_("repository is unrelated"))
1255
1258
1256 self.ui.debug(_("found new changesets starting at ") +
1259 self.ui.debug(_("found new changesets starting at ") +
1257 " ".join([short(f) for f in fetch]) + "\n")
1260 " ".join([short(f) for f in fetch]) + "\n")
1258
1261
1259 self.ui.debug(_("%d total queries\n") % reqcnt)
1262 self.ui.debug(_("%d total queries\n") % reqcnt)
1260
1263
1261 return fetch.keys()
1264 return fetch.keys()
1262
1265
1263 def findoutgoing(self, remote, base=None, heads=None, force=False):
1266 def findoutgoing(self, remote, base=None, heads=None, force=False):
1264 """Return list of nodes that are roots of subsets not in remote
1267 """Return list of nodes that are roots of subsets not in remote
1265
1268
1266 If base dict is specified, assume that these nodes and their parents
1269 If base dict is specified, assume that these nodes and their parents
1267 exist on the remote side.
1270 exist on the remote side.
1268 If a list of heads is specified, return only nodes which are heads
1271 If a list of heads is specified, return only nodes which are heads
1269 or ancestors of these heads, and return a second element which
1272 or ancestors of these heads, and return a second element which
1270 contains all remote heads which get new children.
1273 contains all remote heads which get new children.
1271 """
1274 """
1272 if base == None:
1275 if base == None:
1273 base = {}
1276 base = {}
1274 self.findincoming(remote, base, heads, force=force)
1277 self.findincoming(remote, base, heads, force=force)
1275
1278
1276 self.ui.debug(_("common changesets up to ")
1279 self.ui.debug(_("common changesets up to ")
1277 + " ".join(map(short, base.keys())) + "\n")
1280 + " ".join(map(short, base.keys())) + "\n")
1278
1281
1279 remain = dict.fromkeys(self.changelog.nodemap)
1282 remain = dict.fromkeys(self.changelog.nodemap)
1280
1283
1281 # prune everything remote has from the tree
1284 # prune everything remote has from the tree
1282 del remain[nullid]
1285 del remain[nullid]
1283 remove = base.keys()
1286 remove = base.keys()
1284 while remove:
1287 while remove:
1285 n = remove.pop(0)
1288 n = remove.pop(0)
1286 if n in remain:
1289 if n in remain:
1287 del remain[n]
1290 del remain[n]
1288 for p in self.changelog.parents(n):
1291 for p in self.changelog.parents(n):
1289 remove.append(p)
1292 remove.append(p)
1290
1293
1291 # find every node whose parents have been pruned
1294 # find every node whose parents have been pruned
1292 subset = []
1295 subset = []
1293 # find every remote head that will get new children
1296 # find every remote head that will get new children
1294 updated_heads = {}
1297 updated_heads = {}
1295 for n in remain:
1298 for n in remain:
1296 p1, p2 = self.changelog.parents(n)
1299 p1, p2 = self.changelog.parents(n)
1297 if p1 not in remain and p2 not in remain:
1300 if p1 not in remain and p2 not in remain:
1298 subset.append(n)
1301 subset.append(n)
1299 if heads:
1302 if heads:
1300 if p1 in heads:
1303 if p1 in heads:
1301 updated_heads[p1] = True
1304 updated_heads[p1] = True
1302 if p2 in heads:
1305 if p2 in heads:
1303 updated_heads[p2] = True
1306 updated_heads[p2] = True
1304
1307
1305 # this is the set of all roots we have to push
1308 # this is the set of all roots we have to push
1306 if heads:
1309 if heads:
1307 return subset, updated_heads.keys()
1310 return subset, updated_heads.keys()
1308 else:
1311 else:
1309 return subset
1312 return subset
1310
1313
1311 def pull(self, remote, heads=None, force=False, lock=None):
1314 def pull(self, remote, heads=None, force=False, lock=None):
1312 mylock = False
1315 mylock = False
1313 if not lock:
1316 if not lock:
1314 lock = self.lock()
1317 lock = self.lock()
1315 mylock = True
1318 mylock = True
1316
1319
1317 try:
1320 try:
1318 fetch = self.findincoming(remote, force=force)
1321 fetch = self.findincoming(remote, force=force)
1319 if fetch == [nullid]:
1322 if fetch == [nullid]:
1320 self.ui.status(_("requesting all changes\n"))
1323 self.ui.status(_("requesting all changes\n"))
1321
1324
1322 if not fetch:
1325 if not fetch:
1323 self.ui.status(_("no changes found\n"))
1326 self.ui.status(_("no changes found\n"))
1324 return 0
1327 return 0
1325
1328
1326 if heads is None:
1329 if heads is None:
1327 cg = remote.changegroup(fetch, 'pull')
1330 cg = remote.changegroup(fetch, 'pull')
1328 else:
1331 else:
1329 if 'changegroupsubset' not in remote.capabilities:
1332 if 'changegroupsubset' not in remote.capabilities:
1330 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1333 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1331 cg = remote.changegroupsubset(fetch, heads, 'pull')
1334 cg = remote.changegroupsubset(fetch, heads, 'pull')
1332 return self.addchangegroup(cg, 'pull', remote.url())
1335 return self.addchangegroup(cg, 'pull', remote.url())
1333 finally:
1336 finally:
1334 if mylock:
1337 if mylock:
1335 lock.release()
1338 lock.release()
1336
1339
1337 def push(self, remote, force=False, revs=None):
1340 def push(self, remote, force=False, revs=None):
1338 # there are two ways to push to remote repo:
1341 # there are two ways to push to remote repo:
1339 #
1342 #
1340 # addchangegroup assumes local user can lock remote
1343 # addchangegroup assumes local user can lock remote
1341 # repo (local filesystem, old ssh servers).
1344 # repo (local filesystem, old ssh servers).
1342 #
1345 #
1343 # unbundle assumes local user cannot lock remote repo (new ssh
1346 # unbundle assumes local user cannot lock remote repo (new ssh
1344 # servers, http servers).
1347 # servers, http servers).
1345
1348
1346 if remote.capable('unbundle'):
1349 if remote.capable('unbundle'):
1347 return self.push_unbundle(remote, force, revs)
1350 return self.push_unbundle(remote, force, revs)
1348 return self.push_addchangegroup(remote, force, revs)
1351 return self.push_addchangegroup(remote, force, revs)
1349
1352
1350 def prepush(self, remote, force, revs):
1353 def prepush(self, remote, force, revs):
1351 base = {}
1354 base = {}
1352 remote_heads = remote.heads()
1355 remote_heads = remote.heads()
1353 inc = self.findincoming(remote, base, remote_heads, force=force)
1356 inc = self.findincoming(remote, base, remote_heads, force=force)
1354
1357
1355 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1358 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1356 if revs is not None:
1359 if revs is not None:
1357 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1360 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1358 else:
1361 else:
1359 bases, heads = update, self.changelog.heads()
1362 bases, heads = update, self.changelog.heads()
1360
1363
1361 if not bases:
1364 if not bases:
1362 self.ui.status(_("no changes found\n"))
1365 self.ui.status(_("no changes found\n"))
1363 return None, 1
1366 return None, 1
1364 elif not force:
1367 elif not force:
1365 # check if we're creating new remote heads
1368 # check if we're creating new remote heads
1366 # to be a remote head after push, node must be either
1369 # to be a remote head after push, node must be either
1367 # - unknown locally
1370 # - unknown locally
1368 # - a local outgoing head descended from update
1371 # - a local outgoing head descended from update
1369 # - a remote head that's known locally and not
1372 # - a remote head that's known locally and not
1370 # ancestral to an outgoing head
1373 # ancestral to an outgoing head
1371
1374
1372 warn = 0
1375 warn = 0
1373
1376
1374 if remote_heads == [nullid]:
1377 if remote_heads == [nullid]:
1375 warn = 0
1378 warn = 0
1376 elif not revs and len(heads) > len(remote_heads):
1379 elif not revs and len(heads) > len(remote_heads):
1377 warn = 1
1380 warn = 1
1378 else:
1381 else:
1379 newheads = list(heads)
1382 newheads = list(heads)
1380 for r in remote_heads:
1383 for r in remote_heads:
1381 if r in self.changelog.nodemap:
1384 if r in self.changelog.nodemap:
1382 desc = self.changelog.heads(r, heads)
1385 desc = self.changelog.heads(r, heads)
1383 l = [h for h in heads if h in desc]
1386 l = [h for h in heads if h in desc]
1384 if not l:
1387 if not l:
1385 newheads.append(r)
1388 newheads.append(r)
1386 else:
1389 else:
1387 newheads.append(r)
1390 newheads.append(r)
1388 if len(newheads) > len(remote_heads):
1391 if len(newheads) > len(remote_heads):
1389 warn = 1
1392 warn = 1
1390
1393
1391 if warn:
1394 if warn:
1392 self.ui.warn(_("abort: push creates new remote branches!\n"))
1395 self.ui.warn(_("abort: push creates new remote branches!\n"))
1393 self.ui.status(_("(did you forget to merge?"
1396 self.ui.status(_("(did you forget to merge?"
1394 " use push -f to force)\n"))
1397 " use push -f to force)\n"))
1395 return None, 1
1398 return None, 1
1396 elif inc:
1399 elif inc:
1397 self.ui.warn(_("note: unsynced remote changes!\n"))
1400 self.ui.warn(_("note: unsynced remote changes!\n"))
1398
1401
1399
1402
1400 if revs is None:
1403 if revs is None:
1401 cg = self.changegroup(update, 'push')
1404 cg = self.changegroup(update, 'push')
1402 else:
1405 else:
1403 cg = self.changegroupsubset(update, revs, 'push')
1406 cg = self.changegroupsubset(update, revs, 'push')
1404 return cg, remote_heads
1407 return cg, remote_heads
1405
1408
1406 def push_addchangegroup(self, remote, force, revs):
1409 def push_addchangegroup(self, remote, force, revs):
1407 lock = remote.lock()
1410 lock = remote.lock()
1408
1411
1409 ret = self.prepush(remote, force, revs)
1412 ret = self.prepush(remote, force, revs)
1410 if ret[0] is not None:
1413 if ret[0] is not None:
1411 cg, remote_heads = ret
1414 cg, remote_heads = ret
1412 return remote.addchangegroup(cg, 'push', self.url())
1415 return remote.addchangegroup(cg, 'push', self.url())
1413 return ret[1]
1416 return ret[1]
1414
1417
1415 def push_unbundle(self, remote, force, revs):
1418 def push_unbundle(self, remote, force, revs):
1416 # local repo finds heads on server, finds out what revs it
1419 # local repo finds heads on server, finds out what revs it
1417 # must push. once revs transferred, if server finds it has
1420 # must push. once revs transferred, if server finds it has
1418 # different heads (someone else won commit/push race), server
1421 # different heads (someone else won commit/push race), server
1419 # aborts.
1422 # aborts.
1420
1423
1421 ret = self.prepush(remote, force, revs)
1424 ret = self.prepush(remote, force, revs)
1422 if ret[0] is not None:
1425 if ret[0] is not None:
1423 cg, remote_heads = ret
1426 cg, remote_heads = ret
1424 if force: remote_heads = ['force']
1427 if force: remote_heads = ['force']
1425 return remote.unbundle(cg, remote_heads, 'push')
1428 return remote.unbundle(cg, remote_heads, 'push')
1426 return ret[1]
1429 return ret[1]
1427
1430
1428 def changegroupinfo(self, nodes):
1431 def changegroupinfo(self, nodes):
1429 self.ui.note(_("%d changesets found\n") % len(nodes))
1432 self.ui.note(_("%d changesets found\n") % len(nodes))
1430 if self.ui.debugflag:
1433 if self.ui.debugflag:
1431 self.ui.debug(_("List of changesets:\n"))
1434 self.ui.debug(_("List of changesets:\n"))
1432 for node in nodes:
1435 for node in nodes:
1433 self.ui.debug("%s\n" % hex(node))
1436 self.ui.debug("%s\n" % hex(node))
1434
1437
1435 def changegroupsubset(self, bases, heads, source):
1438 def changegroupsubset(self, bases, heads, source):
1436 """This function generates a changegroup consisting of all the nodes
1439 """This function generates a changegroup consisting of all the nodes
1437 that are descendents of any of the bases, and ancestors of any of
1440 that are descendents of any of the bases, and ancestors of any of
1438 the heads.
1441 the heads.
1439
1442
1440 It is fairly complex as determining which filenodes and which
1443 It is fairly complex as determining which filenodes and which
1441 manifest nodes need to be included for the changeset to be complete
1444 manifest nodes need to be included for the changeset to be complete
1442 is non-trivial.
1445 is non-trivial.
1443
1446
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1447 Another wrinkle is doing the reverse, figuring out which changeset in
1445 the changegroup a particular filenode or manifestnode belongs to."""
1448 the changegroup a particular filenode or manifestnode belongs to."""
1446
1449
1447 self.hook('preoutgoing', throw=True, source=source)
1450 self.hook('preoutgoing', throw=True, source=source)
1448
1451
1449 # Set up some initial variables
1452 # Set up some initial variables
1450 # Make it easy to refer to self.changelog
1453 # Make it easy to refer to self.changelog
1451 cl = self.changelog
1454 cl = self.changelog
1452 # msng is short for missing - compute the list of changesets in this
1455 # msng is short for missing - compute the list of changesets in this
1453 # changegroup.
1456 # changegroup.
1454 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1457 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1455 self.changegroupinfo(msng_cl_lst)
1458 self.changegroupinfo(msng_cl_lst)
1456 # Some bases may turn out to be superfluous, and some heads may be
1459 # Some bases may turn out to be superfluous, and some heads may be
1457 # too. nodesbetween will return the minimal set of bases and heads
1460 # too. nodesbetween will return the minimal set of bases and heads
1458 # necessary to re-create the changegroup.
1461 # necessary to re-create the changegroup.
1459
1462
1460 # Known heads are the list of heads that it is assumed the recipient
1463 # Known heads are the list of heads that it is assumed the recipient
1461 # of this changegroup will know about.
1464 # of this changegroup will know about.
1462 knownheads = {}
1465 knownheads = {}
1463 # We assume that all parents of bases are known heads.
1466 # We assume that all parents of bases are known heads.
1464 for n in bases:
1467 for n in bases:
1465 for p in cl.parents(n):
1468 for p in cl.parents(n):
1466 if p != nullid:
1469 if p != nullid:
1467 knownheads[p] = 1
1470 knownheads[p] = 1
1468 knownheads = knownheads.keys()
1471 knownheads = knownheads.keys()
1469 if knownheads:
1472 if knownheads:
1470 # Now that we know what heads are known, we can compute which
1473 # Now that we know what heads are known, we can compute which
1471 # changesets are known. The recipient must know about all
1474 # changesets are known. The recipient must know about all
1472 # changesets required to reach the known heads from the null
1475 # changesets required to reach the known heads from the null
1473 # changeset.
1476 # changeset.
1474 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1477 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1475 junk = None
1478 junk = None
1476 # Transform the list into an ersatz set.
1479 # Transform the list into an ersatz set.
1477 has_cl_set = dict.fromkeys(has_cl_set)
1480 has_cl_set = dict.fromkeys(has_cl_set)
1478 else:
1481 else:
1479 # If there were no known heads, the recipient cannot be assumed to
1482 # If there were no known heads, the recipient cannot be assumed to
1480 # know about any changesets.
1483 # know about any changesets.
1481 has_cl_set = {}
1484 has_cl_set = {}
1482
1485
1483 # Make it easy to refer to self.manifest
1486 # Make it easy to refer to self.manifest
1484 mnfst = self.manifest
1487 mnfst = self.manifest
1485 # We don't know which manifests are missing yet
1488 # We don't know which manifests are missing yet
1486 msng_mnfst_set = {}
1489 msng_mnfst_set = {}
1487 # Nor do we know which filenodes are missing.
1490 # Nor do we know which filenodes are missing.
1488 msng_filenode_set = {}
1491 msng_filenode_set = {}
1489
1492
1490 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1493 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1491 junk = None
1494 junk = None
1492
1495
1493 # A changeset always belongs to itself, so the changenode lookup
1496 # A changeset always belongs to itself, so the changenode lookup
1494 # function for a changenode is identity.
1497 # function for a changenode is identity.
1495 def identity(x):
1498 def identity(x):
1496 return x
1499 return x
1497
1500
1498 # A function generating function. Sets up an environment for the
1501 # A function generating function. Sets up an environment for the
1499 # inner function.
1502 # inner function.
1500 def cmp_by_rev_func(revlog):
1503 def cmp_by_rev_func(revlog):
1501 # Compare two nodes by their revision number in the environment's
1504 # Compare two nodes by their revision number in the environment's
1502 # revision history. Since the revision number both represents the
1505 # revision history. Since the revision number both represents the
1503 # most efficient order to read the nodes in, and represents a
1506 # most efficient order to read the nodes in, and represents a
1504 # topological sorting of the nodes, this function is often useful.
1507 # topological sorting of the nodes, this function is often useful.
1505 def cmp_by_rev(a, b):
1508 def cmp_by_rev(a, b):
1506 return cmp(revlog.rev(a), revlog.rev(b))
1509 return cmp(revlog.rev(a), revlog.rev(b))
1507 return cmp_by_rev
1510 return cmp_by_rev
1508
1511
1509 # If we determine that a particular file or manifest node must be a
1512 # If we determine that a particular file or manifest node must be a
1510 # node that the recipient of the changegroup will already have, we can
1513 # node that the recipient of the changegroup will already have, we can
1511 # also assume the recipient will have all the parents. This function
1514 # also assume the recipient will have all the parents. This function
1512 # prunes them from the set of missing nodes.
1515 # prunes them from the set of missing nodes.
1513 def prune_parents(revlog, hasset, msngset):
1516 def prune_parents(revlog, hasset, msngset):
1514 haslst = hasset.keys()
1517 haslst = hasset.keys()
1515 haslst.sort(cmp_by_rev_func(revlog))
1518 haslst.sort(cmp_by_rev_func(revlog))
1516 for node in haslst:
1519 for node in haslst:
1517 parentlst = [p for p in revlog.parents(node) if p != nullid]
1520 parentlst = [p for p in revlog.parents(node) if p != nullid]
1518 while parentlst:
1521 while parentlst:
1519 n = parentlst.pop()
1522 n = parentlst.pop()
1520 if n not in hasset:
1523 if n not in hasset:
1521 hasset[n] = 1
1524 hasset[n] = 1
1522 p = [p for p in revlog.parents(n) if p != nullid]
1525 p = [p for p in revlog.parents(n) if p != nullid]
1523 parentlst.extend(p)
1526 parentlst.extend(p)
1524 for n in hasset:
1527 for n in hasset:
1525 msngset.pop(n, None)
1528 msngset.pop(n, None)
1526
1529
1527 # This is a function generating function used to set up an environment
1530 # This is a function generating function used to set up an environment
1528 # for the inner function to execute in.
1531 # for the inner function to execute in.
1529 def manifest_and_file_collector(changedfileset):
1532 def manifest_and_file_collector(changedfileset):
1530 # This is an information gathering function that gathers
1533 # This is an information gathering function that gathers
1531 # information from each changeset node that goes out as part of
1534 # information from each changeset node that goes out as part of
1532 # the changegroup. The information gathered is a list of which
1535 # the changegroup. The information gathered is a list of which
1533 # manifest nodes are potentially required (the recipient may
1536 # manifest nodes are potentially required (the recipient may
1534 # already have them) and total list of all files which were
1537 # already have them) and total list of all files which were
1535 # changed in any changeset in the changegroup.
1538 # changed in any changeset in the changegroup.
1536 #
1539 #
1537 # We also remember the first changenode we saw any manifest
1540 # We also remember the first changenode we saw any manifest
1538 # referenced by so we can later determine which changenode 'owns'
1541 # referenced by so we can later determine which changenode 'owns'
1539 # the manifest.
1542 # the manifest.
1540 def collect_manifests_and_files(clnode):
1543 def collect_manifests_and_files(clnode):
1541 c = cl.read(clnode)
1544 c = cl.read(clnode)
1542 for f in c[3]:
1545 for f in c[3]:
1543 # This is to make sure we only have one instance of each
1546 # This is to make sure we only have one instance of each
1544 # filename string for each filename.
1547 # filename string for each filename.
1545 changedfileset.setdefault(f, f)
1548 changedfileset.setdefault(f, f)
1546 msng_mnfst_set.setdefault(c[0], clnode)
1549 msng_mnfst_set.setdefault(c[0], clnode)
1547 return collect_manifests_and_files
1550 return collect_manifests_and_files
1548
1551
1549 # Figure out which manifest nodes (of the ones we think might be part
1552 # Figure out which manifest nodes (of the ones we think might be part
1550 # of the changegroup) the recipient must know about and remove them
1553 # of the changegroup) the recipient must know about and remove them
1551 # from the changegroup.
1554 # from the changegroup.
1552 def prune_manifests():
1555 def prune_manifests():
1553 has_mnfst_set = {}
1556 has_mnfst_set = {}
1554 for n in msng_mnfst_set:
1557 for n in msng_mnfst_set:
1555 # If a 'missing' manifest thinks it belongs to a changenode
1558 # If a 'missing' manifest thinks it belongs to a changenode
1556 # the recipient is assumed to have, obviously the recipient
1559 # the recipient is assumed to have, obviously the recipient
1557 # must have that manifest.
1560 # must have that manifest.
1558 linknode = cl.node(mnfst.linkrev(n))
1561 linknode = cl.node(mnfst.linkrev(n))
1559 if linknode in has_cl_set:
1562 if linknode in has_cl_set:
1560 has_mnfst_set[n] = 1
1563 has_mnfst_set[n] = 1
1561 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1564 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1562
1565
1563 # Use the information collected in collect_manifests_and_files to say
1566 # Use the information collected in collect_manifests_and_files to say
1564 # which changenode any manifestnode belongs to.
1567 # which changenode any manifestnode belongs to.
1565 def lookup_manifest_link(mnfstnode):
1568 def lookup_manifest_link(mnfstnode):
1566 return msng_mnfst_set[mnfstnode]
1569 return msng_mnfst_set[mnfstnode]
1567
1570
1568 # A function generating function that sets up the initial environment
1571 # A function generating function that sets up the initial environment
1569 # the inner function.
1572 # the inner function.
1570 def filenode_collector(changedfiles):
1573 def filenode_collector(changedfiles):
1571 next_rev = [0]
1574 next_rev = [0]
1572 # This gathers information from each manifestnode included in the
1575 # This gathers information from each manifestnode included in the
1573 # changegroup about which filenodes the manifest node references
1576 # changegroup about which filenodes the manifest node references
1574 # so we can include those in the changegroup too.
1577 # so we can include those in the changegroup too.
1575 #
1578 #
1576 # It also remembers which changenode each filenode belongs to. It
1579 # It also remembers which changenode each filenode belongs to. It
1577 # does this by assuming the a filenode belongs to the changenode
1580 # does this by assuming the a filenode belongs to the changenode
1578 # the first manifest that references it belongs to.
1581 # the first manifest that references it belongs to.
1579 def collect_msng_filenodes(mnfstnode):
1582 def collect_msng_filenodes(mnfstnode):
1580 r = mnfst.rev(mnfstnode)
1583 r = mnfst.rev(mnfstnode)
1581 if r == next_rev[0]:
1584 if r == next_rev[0]:
1582 # If the last rev we looked at was the one just previous,
1585 # If the last rev we looked at was the one just previous,
1583 # we only need to see a diff.
1586 # we only need to see a diff.
1584 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1587 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1585 # For each line in the delta
1588 # For each line in the delta
1586 for dline in delta.splitlines():
1589 for dline in delta.splitlines():
1587 # get the filename and filenode for that line
1590 # get the filename and filenode for that line
1588 f, fnode = dline.split('\0')
1591 f, fnode = dline.split('\0')
1589 fnode = bin(fnode[:40])
1592 fnode = bin(fnode[:40])
1590 f = changedfiles.get(f, None)
1593 f = changedfiles.get(f, None)
1591 # And if the file is in the list of files we care
1594 # And if the file is in the list of files we care
1592 # about.
1595 # about.
1593 if f is not None:
1596 if f is not None:
1594 # Get the changenode this manifest belongs to
1597 # Get the changenode this manifest belongs to
1595 clnode = msng_mnfst_set[mnfstnode]
1598 clnode = msng_mnfst_set[mnfstnode]
1596 # Create the set of filenodes for the file if
1599 # Create the set of filenodes for the file if
1597 # there isn't one already.
1600 # there isn't one already.
1598 ndset = msng_filenode_set.setdefault(f, {})
1601 ndset = msng_filenode_set.setdefault(f, {})
1599 # And set the filenode's changelog node to the
1602 # And set the filenode's changelog node to the
1600 # manifest's if it hasn't been set already.
1603 # manifest's if it hasn't been set already.
1601 ndset.setdefault(fnode, clnode)
1604 ndset.setdefault(fnode, clnode)
1602 else:
1605 else:
1603 # Otherwise we need a full manifest.
1606 # Otherwise we need a full manifest.
1604 m = mnfst.read(mnfstnode)
1607 m = mnfst.read(mnfstnode)
1605 # For every file in we care about.
1608 # For every file in we care about.
1606 for f in changedfiles:
1609 for f in changedfiles:
1607 fnode = m.get(f, None)
1610 fnode = m.get(f, None)
1608 # If it's in the manifest
1611 # If it's in the manifest
1609 if fnode is not None:
1612 if fnode is not None:
1610 # See comments above.
1613 # See comments above.
1611 clnode = msng_mnfst_set[mnfstnode]
1614 clnode = msng_mnfst_set[mnfstnode]
1612 ndset = msng_filenode_set.setdefault(f, {})
1615 ndset = msng_filenode_set.setdefault(f, {})
1613 ndset.setdefault(fnode, clnode)
1616 ndset.setdefault(fnode, clnode)
1614 # Remember the revision we hope to see next.
1617 # Remember the revision we hope to see next.
1615 next_rev[0] = r + 1
1618 next_rev[0] = r + 1
1616 return collect_msng_filenodes
1619 return collect_msng_filenodes
1617
1620
1618 # We have a list of filenodes we think we need for a file, lets remove
1621 # We have a list of filenodes we think we need for a file, lets remove
1619 # all those we now the recipient must have.
1622 # all those we now the recipient must have.
1620 def prune_filenodes(f, filerevlog):
1623 def prune_filenodes(f, filerevlog):
1621 msngset = msng_filenode_set[f]
1624 msngset = msng_filenode_set[f]
1622 hasset = {}
1625 hasset = {}
1623 # If a 'missing' filenode thinks it belongs to a changenode we
1626 # If a 'missing' filenode thinks it belongs to a changenode we
1624 # assume the recipient must have, then the recipient must have
1627 # assume the recipient must have, then the recipient must have
1625 # that filenode.
1628 # that filenode.
1626 for n in msngset:
1629 for n in msngset:
1627 clnode = cl.node(filerevlog.linkrev(n))
1630 clnode = cl.node(filerevlog.linkrev(n))
1628 if clnode in has_cl_set:
1631 if clnode in has_cl_set:
1629 hasset[n] = 1
1632 hasset[n] = 1
1630 prune_parents(filerevlog, hasset, msngset)
1633 prune_parents(filerevlog, hasset, msngset)
1631
1634
1632 # A function generator function that sets up the a context for the
1635 # A function generator function that sets up the a context for the
1633 # inner function.
1636 # inner function.
1634 def lookup_filenode_link_func(fname):
1637 def lookup_filenode_link_func(fname):
1635 msngset = msng_filenode_set[fname]
1638 msngset = msng_filenode_set[fname]
1636 # Lookup the changenode the filenode belongs to.
1639 # Lookup the changenode the filenode belongs to.
1637 def lookup_filenode_link(fnode):
1640 def lookup_filenode_link(fnode):
1638 return msngset[fnode]
1641 return msngset[fnode]
1639 return lookup_filenode_link
1642 return lookup_filenode_link
1640
1643
1641 # Now that we have all theses utility functions to help out and
1644 # Now that we have all theses utility functions to help out and
1642 # logically divide up the task, generate the group.
1645 # logically divide up the task, generate the group.
1643 def gengroup():
1646 def gengroup():
1644 # The set of changed files starts empty.
1647 # The set of changed files starts empty.
1645 changedfiles = {}
1648 changedfiles = {}
1646 # Create a changenode group generator that will call our functions
1649 # Create a changenode group generator that will call our functions
1647 # back to lookup the owning changenode and collect information.
1650 # back to lookup the owning changenode and collect information.
1648 group = cl.group(msng_cl_lst, identity,
1651 group = cl.group(msng_cl_lst, identity,
1649 manifest_and_file_collector(changedfiles))
1652 manifest_and_file_collector(changedfiles))
1650 for chnk in group:
1653 for chnk in group:
1651 yield chnk
1654 yield chnk
1652
1655
1653 # The list of manifests has been collected by the generator
1656 # The list of manifests has been collected by the generator
1654 # calling our functions back.
1657 # calling our functions back.
1655 prune_manifests()
1658 prune_manifests()
1656 msng_mnfst_lst = msng_mnfst_set.keys()
1659 msng_mnfst_lst = msng_mnfst_set.keys()
1657 # Sort the manifestnodes by revision number.
1660 # Sort the manifestnodes by revision number.
1658 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1661 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1659 # Create a generator for the manifestnodes that calls our lookup
1662 # Create a generator for the manifestnodes that calls our lookup
1660 # and data collection functions back.
1663 # and data collection functions back.
1661 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1664 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1662 filenode_collector(changedfiles))
1665 filenode_collector(changedfiles))
1663 for chnk in group:
1666 for chnk in group:
1664 yield chnk
1667 yield chnk
1665
1668
1666 # These are no longer needed, dereference and toss the memory for
1669 # These are no longer needed, dereference and toss the memory for
1667 # them.
1670 # them.
1668 msng_mnfst_lst = None
1671 msng_mnfst_lst = None
1669 msng_mnfst_set.clear()
1672 msng_mnfst_set.clear()
1670
1673
1671 changedfiles = changedfiles.keys()
1674 changedfiles = changedfiles.keys()
1672 changedfiles.sort()
1675 changedfiles.sort()
1673 # Go through all our files in order sorted by name.
1676 # Go through all our files in order sorted by name.
1674 for fname in changedfiles:
1677 for fname in changedfiles:
1675 filerevlog = self.file(fname)
1678 filerevlog = self.file(fname)
1676 # Toss out the filenodes that the recipient isn't really
1679 # Toss out the filenodes that the recipient isn't really
1677 # missing.
1680 # missing.
1678 if msng_filenode_set.has_key(fname):
1681 if msng_filenode_set.has_key(fname):
1679 prune_filenodes(fname, filerevlog)
1682 prune_filenodes(fname, filerevlog)
1680 msng_filenode_lst = msng_filenode_set[fname].keys()
1683 msng_filenode_lst = msng_filenode_set[fname].keys()
1681 else:
1684 else:
1682 msng_filenode_lst = []
1685 msng_filenode_lst = []
1683 # If any filenodes are left, generate the group for them,
1686 # If any filenodes are left, generate the group for them,
1684 # otherwise don't bother.
1687 # otherwise don't bother.
1685 if len(msng_filenode_lst) > 0:
1688 if len(msng_filenode_lst) > 0:
1686 yield changegroup.genchunk(fname)
1689 yield changegroup.genchunk(fname)
1687 # Sort the filenodes by their revision #
1690 # Sort the filenodes by their revision #
1688 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1691 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1689 # Create a group generator and only pass in a changenode
1692 # Create a group generator and only pass in a changenode
1690 # lookup function as we need to collect no information
1693 # lookup function as we need to collect no information
1691 # from filenodes.
1694 # from filenodes.
1692 group = filerevlog.group(msng_filenode_lst,
1695 group = filerevlog.group(msng_filenode_lst,
1693 lookup_filenode_link_func(fname))
1696 lookup_filenode_link_func(fname))
1694 for chnk in group:
1697 for chnk in group:
1695 yield chnk
1698 yield chnk
1696 if msng_filenode_set.has_key(fname):
1699 if msng_filenode_set.has_key(fname):
1697 # Don't need this anymore, toss it to free memory.
1700 # Don't need this anymore, toss it to free memory.
1698 del msng_filenode_set[fname]
1701 del msng_filenode_set[fname]
1699 # Signal that no more groups are left.
1702 # Signal that no more groups are left.
1700 yield changegroup.closechunk()
1703 yield changegroup.closechunk()
1701
1704
1702 if msng_cl_lst:
1705 if msng_cl_lst:
1703 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1706 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1704
1707
1705 return util.chunkbuffer(gengroup())
1708 return util.chunkbuffer(gengroup())
1706
1709
1707 def changegroup(self, basenodes, source):
1710 def changegroup(self, basenodes, source):
1708 """Generate a changegroup of all nodes that we have that a recipient
1711 """Generate a changegroup of all nodes that we have that a recipient
1709 doesn't.
1712 doesn't.
1710
1713
1711 This is much easier than the previous function as we can assume that
1714 This is much easier than the previous function as we can assume that
1712 the recipient has any changenode we aren't sending them."""
1715 the recipient has any changenode we aren't sending them."""
1713
1716
1714 self.hook('preoutgoing', throw=True, source=source)
1717 self.hook('preoutgoing', throw=True, source=source)
1715
1718
1716 cl = self.changelog
1719 cl = self.changelog
1717 nodes = cl.nodesbetween(basenodes, None)[0]
1720 nodes = cl.nodesbetween(basenodes, None)[0]
1718 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1721 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1719 self.changegroupinfo(nodes)
1722 self.changegroupinfo(nodes)
1720
1723
1721 def identity(x):
1724 def identity(x):
1722 return x
1725 return x
1723
1726
1724 def gennodelst(revlog):
1727 def gennodelst(revlog):
1725 for r in xrange(0, revlog.count()):
1728 for r in xrange(0, revlog.count()):
1726 n = revlog.node(r)
1729 n = revlog.node(r)
1727 if revlog.linkrev(n) in revset:
1730 if revlog.linkrev(n) in revset:
1728 yield n
1731 yield n
1729
1732
1730 def changed_file_collector(changedfileset):
1733 def changed_file_collector(changedfileset):
1731 def collect_changed_files(clnode):
1734 def collect_changed_files(clnode):
1732 c = cl.read(clnode)
1735 c = cl.read(clnode)
1733 for fname in c[3]:
1736 for fname in c[3]:
1734 changedfileset[fname] = 1
1737 changedfileset[fname] = 1
1735 return collect_changed_files
1738 return collect_changed_files
1736
1739
1737 def lookuprevlink_func(revlog):
1740 def lookuprevlink_func(revlog):
1738 def lookuprevlink(n):
1741 def lookuprevlink(n):
1739 return cl.node(revlog.linkrev(n))
1742 return cl.node(revlog.linkrev(n))
1740 return lookuprevlink
1743 return lookuprevlink
1741
1744
1742 def gengroup():
1745 def gengroup():
1743 # construct a list of all changed files
1746 # construct a list of all changed files
1744 changedfiles = {}
1747 changedfiles = {}
1745
1748
1746 for chnk in cl.group(nodes, identity,
1749 for chnk in cl.group(nodes, identity,
1747 changed_file_collector(changedfiles)):
1750 changed_file_collector(changedfiles)):
1748 yield chnk
1751 yield chnk
1749 changedfiles = changedfiles.keys()
1752 changedfiles = changedfiles.keys()
1750 changedfiles.sort()
1753 changedfiles.sort()
1751
1754
1752 mnfst = self.manifest
1755 mnfst = self.manifest
1753 nodeiter = gennodelst(mnfst)
1756 nodeiter = gennodelst(mnfst)
1754 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1757 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1755 yield chnk
1758 yield chnk
1756
1759
1757 for fname in changedfiles:
1760 for fname in changedfiles:
1758 filerevlog = self.file(fname)
1761 filerevlog = self.file(fname)
1759 nodeiter = gennodelst(filerevlog)
1762 nodeiter = gennodelst(filerevlog)
1760 nodeiter = list(nodeiter)
1763 nodeiter = list(nodeiter)
1761 if nodeiter:
1764 if nodeiter:
1762 yield changegroup.genchunk(fname)
1765 yield changegroup.genchunk(fname)
1763 lookup = lookuprevlink_func(filerevlog)
1766 lookup = lookuprevlink_func(filerevlog)
1764 for chnk in filerevlog.group(nodeiter, lookup):
1767 for chnk in filerevlog.group(nodeiter, lookup):
1765 yield chnk
1768 yield chnk
1766
1769
1767 yield changegroup.closechunk()
1770 yield changegroup.closechunk()
1768
1771
1769 if nodes:
1772 if nodes:
1770 self.hook('outgoing', node=hex(nodes[0]), source=source)
1773 self.hook('outgoing', node=hex(nodes[0]), source=source)
1771
1774
1772 return util.chunkbuffer(gengroup())
1775 return util.chunkbuffer(gengroup())
1773
1776
1774 def addchangegroup(self, source, srctype, url):
1777 def addchangegroup(self, source, srctype, url):
1775 """add changegroup to repo.
1778 """add changegroup to repo.
1776
1779
1777 return values:
1780 return values:
1778 - nothing changed or no source: 0
1781 - nothing changed or no source: 0
1779 - more heads than before: 1+added heads (2..n)
1782 - more heads than before: 1+added heads (2..n)
1780 - less heads than before: -1-removed heads (-2..-n)
1783 - less heads than before: -1-removed heads (-2..-n)
1781 - number of heads stays the same: 1
1784 - number of heads stays the same: 1
1782 """
1785 """
1783 def csmap(x):
1786 def csmap(x):
1784 self.ui.debug(_("add changeset %s\n") % short(x))
1787 self.ui.debug(_("add changeset %s\n") % short(x))
1785 return cl.count()
1788 return cl.count()
1786
1789
1787 def revmap(x):
1790 def revmap(x):
1788 return cl.rev(x)
1791 return cl.rev(x)
1789
1792
1790 if not source:
1793 if not source:
1791 return 0
1794 return 0
1792
1795
1793 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1796 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1794
1797
1795 changesets = files = revisions = 0
1798 changesets = files = revisions = 0
1796
1799
1797 tr = self.transaction()
1800 tr = self.transaction()
1798
1801
1799 # write changelog data to temp files so concurrent readers will not see
1802 # write changelog data to temp files so concurrent readers will not see
1800 # inconsistent view
1803 # inconsistent view
1801 cl = self.changelog
1804 cl = self.changelog
1802 cl.delayupdate()
1805 cl.delayupdate()
1803 oldheads = len(cl.heads())
1806 oldheads = len(cl.heads())
1804
1807
1805 # pull off the changeset group
1808 # pull off the changeset group
1806 self.ui.status(_("adding changesets\n"))
1809 self.ui.status(_("adding changesets\n"))
1807 cor = cl.count() - 1
1810 cor = cl.count() - 1
1808 chunkiter = changegroup.chunkiter(source)
1811 chunkiter = changegroup.chunkiter(source)
1809 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1812 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1810 raise util.Abort(_("received changelog group is empty"))
1813 raise util.Abort(_("received changelog group is empty"))
1811 cnr = cl.count() - 1
1814 cnr = cl.count() - 1
1812 changesets = cnr - cor
1815 changesets = cnr - cor
1813
1816
1814 # pull off the manifest group
1817 # pull off the manifest group
1815 self.ui.status(_("adding manifests\n"))
1818 self.ui.status(_("adding manifests\n"))
1816 chunkiter = changegroup.chunkiter(source)
1819 chunkiter = changegroup.chunkiter(source)
1817 # no need to check for empty manifest group here:
1820 # no need to check for empty manifest group here:
1818 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1821 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1819 # no new manifest will be created and the manifest group will
1822 # no new manifest will be created and the manifest group will
1820 # be empty during the pull
1823 # be empty during the pull
1821 self.manifest.addgroup(chunkiter, revmap, tr)
1824 self.manifest.addgroup(chunkiter, revmap, tr)
1822
1825
1823 # process the files
1826 # process the files
1824 self.ui.status(_("adding file changes\n"))
1827 self.ui.status(_("adding file changes\n"))
1825 while 1:
1828 while 1:
1826 f = changegroup.getchunk(source)
1829 f = changegroup.getchunk(source)
1827 if not f:
1830 if not f:
1828 break
1831 break
1829 self.ui.debug(_("adding %s revisions\n") % f)
1832 self.ui.debug(_("adding %s revisions\n") % f)
1830 fl = self.file(f)
1833 fl = self.file(f)
1831 o = fl.count()
1834 o = fl.count()
1832 chunkiter = changegroup.chunkiter(source)
1835 chunkiter = changegroup.chunkiter(source)
1833 if fl.addgroup(chunkiter, revmap, tr) is None:
1836 if fl.addgroup(chunkiter, revmap, tr) is None:
1834 raise util.Abort(_("received file revlog group is empty"))
1837 raise util.Abort(_("received file revlog group is empty"))
1835 revisions += fl.count() - o
1838 revisions += fl.count() - o
1836 files += 1
1839 files += 1
1837
1840
1838 # make changelog see real files again
1841 # make changelog see real files again
1839 cl.finalize(tr)
1842 cl.finalize(tr)
1840
1843
1841 newheads = len(self.changelog.heads())
1844 newheads = len(self.changelog.heads())
1842 heads = ""
1845 heads = ""
1843 if oldheads and newheads != oldheads:
1846 if oldheads and newheads != oldheads:
1844 heads = _(" (%+d heads)") % (newheads - oldheads)
1847 heads = _(" (%+d heads)") % (newheads - oldheads)
1845
1848
1846 self.ui.status(_("added %d changesets"
1849 self.ui.status(_("added %d changesets"
1847 " with %d changes to %d files%s\n")
1850 " with %d changes to %d files%s\n")
1848 % (changesets, revisions, files, heads))
1851 % (changesets, revisions, files, heads))
1849
1852
1850 if changesets > 0:
1853 if changesets > 0:
1851 self.hook('pretxnchangegroup', throw=True,
1854 self.hook('pretxnchangegroup', throw=True,
1852 node=hex(self.changelog.node(cor+1)), source=srctype,
1855 node=hex(self.changelog.node(cor+1)), source=srctype,
1853 url=url)
1856 url=url)
1854
1857
1855 tr.close()
1858 tr.close()
1856
1859
1857 if changesets > 0:
1860 if changesets > 0:
1858 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1861 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1859 source=srctype, url=url)
1862 source=srctype, url=url)
1860
1863
1861 for i in xrange(cor + 1, cnr + 1):
1864 for i in xrange(cor + 1, cnr + 1):
1862 self.hook("incoming", node=hex(self.changelog.node(i)),
1865 self.hook("incoming", node=hex(self.changelog.node(i)),
1863 source=srctype, url=url)
1866 source=srctype, url=url)
1864
1867
1865 # never return 0 here:
1868 # never return 0 here:
1866 if newheads < oldheads:
1869 if newheads < oldheads:
1867 return newheads - oldheads - 1
1870 return newheads - oldheads - 1
1868 else:
1871 else:
1869 return newheads - oldheads + 1
1872 return newheads - oldheads + 1
1870
1873
1871
1874
1872 def stream_in(self, remote):
1875 def stream_in(self, remote):
1873 fp = remote.stream_out()
1876 fp = remote.stream_out()
1874 l = fp.readline()
1877 l = fp.readline()
1875 try:
1878 try:
1876 resp = int(l)
1879 resp = int(l)
1877 except ValueError:
1880 except ValueError:
1878 raise util.UnexpectedOutput(
1881 raise util.UnexpectedOutput(
1879 _('Unexpected response from remote server:'), l)
1882 _('Unexpected response from remote server:'), l)
1880 if resp == 1:
1883 if resp == 1:
1881 raise util.Abort(_('operation forbidden by server'))
1884 raise util.Abort(_('operation forbidden by server'))
1882 elif resp == 2:
1885 elif resp == 2:
1883 raise util.Abort(_('locking the remote repository failed'))
1886 raise util.Abort(_('locking the remote repository failed'))
1884 elif resp != 0:
1887 elif resp != 0:
1885 raise util.Abort(_('the server sent an unknown error code'))
1888 raise util.Abort(_('the server sent an unknown error code'))
1886 self.ui.status(_('streaming all changes\n'))
1889 self.ui.status(_('streaming all changes\n'))
1887 l = fp.readline()
1890 l = fp.readline()
1888 try:
1891 try:
1889 total_files, total_bytes = map(int, l.split(' ', 1))
1892 total_files, total_bytes = map(int, l.split(' ', 1))
1890 except ValueError, TypeError:
1893 except ValueError, TypeError:
1891 raise util.UnexpectedOutput(
1894 raise util.UnexpectedOutput(
1892 _('Unexpected response from remote server:'), l)
1895 _('Unexpected response from remote server:'), l)
1893 self.ui.status(_('%d files to transfer, %s of data\n') %
1896 self.ui.status(_('%d files to transfer, %s of data\n') %
1894 (total_files, util.bytecount(total_bytes)))
1897 (total_files, util.bytecount(total_bytes)))
1895 start = time.time()
1898 start = time.time()
1896 for i in xrange(total_files):
1899 for i in xrange(total_files):
1897 # XXX doesn't support '\n' or '\r' in filenames
1900 # XXX doesn't support '\n' or '\r' in filenames
1898 l = fp.readline()
1901 l = fp.readline()
1899 try:
1902 try:
1900 name, size = l.split('\0', 1)
1903 name, size = l.split('\0', 1)
1901 size = int(size)
1904 size = int(size)
1902 except ValueError, TypeError:
1905 except ValueError, TypeError:
1903 raise util.UnexpectedOutput(
1906 raise util.UnexpectedOutput(
1904 _('Unexpected response from remote server:'), l)
1907 _('Unexpected response from remote server:'), l)
1905 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1908 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1906 ofp = self.sopener(name, 'w')
1909 ofp = self.sopener(name, 'w')
1907 for chunk in util.filechunkiter(fp, limit=size):
1910 for chunk in util.filechunkiter(fp, limit=size):
1908 ofp.write(chunk)
1911 ofp.write(chunk)
1909 ofp.close()
1912 ofp.close()
1910 elapsed = time.time() - start
1913 elapsed = time.time() - start
1911 if elapsed <= 0:
1914 if elapsed <= 0:
1912 elapsed = 0.001
1915 elapsed = 0.001
1913 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1916 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1914 (util.bytecount(total_bytes), elapsed,
1917 (util.bytecount(total_bytes), elapsed,
1915 util.bytecount(total_bytes / elapsed)))
1918 util.bytecount(total_bytes / elapsed)))
1916 self.reload()
1919 self.reload()
1917 return len(self.heads()) + 1
1920 return len(self.heads()) + 1
1918
1921
1919 def clone(self, remote, heads=[], stream=False):
1922 def clone(self, remote, heads=[], stream=False):
1920 '''clone remote repository.
1923 '''clone remote repository.
1921
1924
1922 keyword arguments:
1925 keyword arguments:
1923 heads: list of revs to clone (forces use of pull)
1926 heads: list of revs to clone (forces use of pull)
1924 stream: use streaming clone if possible'''
1927 stream: use streaming clone if possible'''
1925
1928
1926 # now, all clients that can request uncompressed clones can
1929 # now, all clients that can request uncompressed clones can
1927 # read repo formats supported by all servers that can serve
1930 # read repo formats supported by all servers that can serve
1928 # them.
1931 # them.
1929
1932
1930 # if revlog format changes, client will have to check version
1933 # if revlog format changes, client will have to check version
1931 # and format flags on "stream" capability, and use
1934 # and format flags on "stream" capability, and use
1932 # uncompressed only if compatible.
1935 # uncompressed only if compatible.
1933
1936
1934 if stream and not heads and remote.capable('stream'):
1937 if stream and not heads and remote.capable('stream'):
1935 return self.stream_in(remote)
1938 return self.stream_in(remote)
1936 return self.pull(remote, heads)
1939 return self.pull(remote, heads)
1937
1940
1938 # used to avoid circular references so destructors work
1941 # used to avoid circular references so destructors work
1939 def aftertrans(files):
1942 def aftertrans(files):
1940 renamefiles = [tuple(t) for t in files]
1943 renamefiles = [tuple(t) for t in files]
1941 def a():
1944 def a():
1942 for src, dest in renamefiles:
1945 for src, dest in renamefiles:
1943 util.rename(src, dest)
1946 util.rename(src, dest)
1944 return a
1947 return a
1945
1948
1946 def instance(ui, path, create):
1949 def instance(ui, path, create):
1947 return localrepository(ui, util.drop_scheme('file', path), create)
1950 return localrepository(ui, util.drop_scheme('file', path), create)
1948
1951
1949 def islocal(path):
1952 def islocal(path):
1950 return True
1953 return True
@@ -1,88 +1,96 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init a
3 hg init a
4
4
5 cd a
5 cd a
6 echo a > a
6 echo a > a
7 hg ci -Ama -d '1 0'
7 hg ci -Ama -d '1 0'
8
8
9 hg cp a b
9 hg cp a b
10 hg ci -mb -d '2 0'
10 hg ci -mb -d '2 0'
11
11
12 mkdir dir
12 mkdir dir
13 hg mv b dir
13 hg mv b dir
14 hg ci -mc -d '3 0'
14 hg ci -mc -d '3 0'
15
15
16 hg mv a b
16 hg mv a b
17 echo a > d
17 echo a > d
18 hg add d
18 hg add d
19 hg ci -md -d '4 0'
19 hg ci -md -d '4 0'
20
20
21 hg mv dir/b e
21 hg mv dir/b e
22 hg ci -me -d '5 0'
22 hg ci -me -d '5 0'
23
23
24 hg log a
24 hg log a
25 echo % -f, directory
25 echo % -f, directory
26 hg log -f dir
26 hg log -f dir
27 echo % -f, but no args
27 echo % -f, but no args
28 hg log -f
28 hg log -f
29 echo % one rename
29 echo % one rename
30 hg log -vf a
30 hg log -vf a
31 echo % many renames
31 echo % many renames
32 hg log -vf e
32 hg log -vf e
33
33
34 echo % log copies
34 echo % log copies
35 hg log -vC --template '{rev} {file_copies%filecopy}\n'
35 hg log -vC --template '{rev} {file_copies%filecopy}\n'
36
36
37 echo % log copies, non-linear manifest
37 echo % log copies, non-linear manifest
38 hg up -C 3
38 hg up -C 3
39 hg mv dir/b e
39 hg mv dir/b e
40 echo foo > foo
40 echo foo > foo
41 hg ci -Ame2 -d '6 0'
41 hg ci -Ame2 -d '6 0'
42 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r 5
42 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r 5
43
43
44 echo '% log -p d'
44 echo '% log -p d'
45 hg log -pv d
45 hg log -pv d
46
46
47 # log --follow tests
47 # log --follow tests
48 hg init ../follow
48 hg init ../follow
49 cd ../follow
49 cd ../follow
50
50 echo base > base
51 echo base > base
51 hg ci -Ambase -d '1 0'
52 hg ci -Ambase -d '1 0'
52
53
53 echo r1 >> base
54 echo r1 >> base
54 hg ci -Amr1 -d '1 0'
55 hg ci -Amr1 -d '1 0'
55 echo r2 >> base
56 echo r2 >> base
56 hg ci -Amr2 -d '1 0'
57 hg ci -Amr2 -d '1 0'
57
58
58 hg up -C 1
59 hg up -C 1
59 echo b1 > b1
60 echo b1 > b1
60 hg ci -Amb1 -d '1 0'
61 hg ci -Amb1 -d '1 0'
61
62
62 echo % log -f
63 echo % log -f
63 hg log -f
64 hg log -f
64
65
65 hg up -C 0
66 hg up -C 0
66 echo b2 > b2
67 echo b2 > b2
67 hg ci -Amb2 -d '1 0'
68 hg ci -Amb2 -d '1 0'
68
69
69 echo % log -f -r 1:tip
70 echo % log -f -r 1:tip
70 hg log -f -r 1:tip
71 hg log -f -r 1:tip
71
72
72 hg up -C 3
73 hg up -C 3
73 hg merge tip
74 hg merge tip
75
76 echo % log -r . with two parents
77 hg log -r .
78
74 hg ci -mm12 -d '1 0'
79 hg ci -mm12 -d '1 0'
75
80
81 echo % log -r . with one parent
82 hg log -r .
83
76 echo postm >> b1
84 echo postm >> b1
77 hg ci -Amb1.1 -d'1 0'
85 hg ci -Amb1.1 -d'1 0'
78
86
79 echo % log --follow-first
87 echo % log --follow-first
80 hg log --follow-first
88 hg log --follow-first
81
89
82 echo % log -P 2
90 echo % log -P 2
83 hg log -P 2
91 hg log -P 2
84
92
85 echo '% log -r ""'
93 echo '% log -r ""'
86 hg log -r ''
94 hg log -r ''
87
95
88 exit 0
96 exit 0
@@ -1,204 +1,221 b''
1 adding a
1 adding a
2 changeset: 0:8580ff50825a
2 changeset: 0:8580ff50825a
3 user: test
3 user: test
4 date: Thu Jan 01 00:00:01 1970 +0000
4 date: Thu Jan 01 00:00:01 1970 +0000
5 summary: a
5 summary: a
6
6
7 % -f, directory
7 % -f, directory
8 abort: can only follow copies/renames for explicit file names
8 abort: can only follow copies/renames for explicit file names
9 % -f, but no args
9 % -f, but no args
10 changeset: 4:b30c444c7c84
10 changeset: 4:b30c444c7c84
11 tag: tip
11 tag: tip
12 user: test
12 user: test
13 date: Thu Jan 01 00:00:05 1970 +0000
13 date: Thu Jan 01 00:00:05 1970 +0000
14 summary: e
14 summary: e
15
15
16 changeset: 3:16b60bf3f99a
16 changeset: 3:16b60bf3f99a
17 user: test
17 user: test
18 date: Thu Jan 01 00:00:04 1970 +0000
18 date: Thu Jan 01 00:00:04 1970 +0000
19 summary: d
19 summary: d
20
20
21 changeset: 2:21fba396af4c
21 changeset: 2:21fba396af4c
22 user: test
22 user: test
23 date: Thu Jan 01 00:00:03 1970 +0000
23 date: Thu Jan 01 00:00:03 1970 +0000
24 summary: c
24 summary: c
25
25
26 changeset: 1:c0296dabce9b
26 changeset: 1:c0296dabce9b
27 user: test
27 user: test
28 date: Thu Jan 01 00:00:02 1970 +0000
28 date: Thu Jan 01 00:00:02 1970 +0000
29 summary: b
29 summary: b
30
30
31 changeset: 0:8580ff50825a
31 changeset: 0:8580ff50825a
32 user: test
32 user: test
33 date: Thu Jan 01 00:00:01 1970 +0000
33 date: Thu Jan 01 00:00:01 1970 +0000
34 summary: a
34 summary: a
35
35
36 % one rename
36 % one rename
37 changeset: 0:8580ff50825a
37 changeset: 0:8580ff50825a
38 user: test
38 user: test
39 date: Thu Jan 01 00:00:01 1970 +0000
39 date: Thu Jan 01 00:00:01 1970 +0000
40 files: a
40 files: a
41 description:
41 description:
42 a
42 a
43
43
44
44
45 % many renames
45 % many renames
46 changeset: 4:b30c444c7c84
46 changeset: 4:b30c444c7c84
47 tag: tip
47 tag: tip
48 user: test
48 user: test
49 date: Thu Jan 01 00:00:05 1970 +0000
49 date: Thu Jan 01 00:00:05 1970 +0000
50 files: dir/b e
50 files: dir/b e
51 description:
51 description:
52 e
52 e
53
53
54
54
55 changeset: 2:21fba396af4c
55 changeset: 2:21fba396af4c
56 user: test
56 user: test
57 date: Thu Jan 01 00:00:03 1970 +0000
57 date: Thu Jan 01 00:00:03 1970 +0000
58 files: b dir/b
58 files: b dir/b
59 description:
59 description:
60 c
60 c
61
61
62
62
63 changeset: 1:c0296dabce9b
63 changeset: 1:c0296dabce9b
64 user: test
64 user: test
65 date: Thu Jan 01 00:00:02 1970 +0000
65 date: Thu Jan 01 00:00:02 1970 +0000
66 files: b
66 files: b
67 description:
67 description:
68 b
68 b
69
69
70
70
71 changeset: 0:8580ff50825a
71 changeset: 0:8580ff50825a
72 user: test
72 user: test
73 date: Thu Jan 01 00:00:01 1970 +0000
73 date: Thu Jan 01 00:00:01 1970 +0000
74 files: a
74 files: a
75 description:
75 description:
76 a
76 a
77
77
78
78
79 % log copies
79 % log copies
80 4 e (dir/b)
80 4 e (dir/b)
81 3 b (a)
81 3 b (a)
82 2 dir/b (b)
82 2 dir/b (b)
83 1 b (a)
83 1 b (a)
84 0
84 0
85 % log copies, non-linear manifest
85 % log copies, non-linear manifest
86 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
86 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
87 adding foo
87 adding foo
88 5 e (dir/b)
88 5 e (dir/b)
89 % log -p d
89 % log -p d
90 changeset: 3:16b60bf3f99a
90 changeset: 3:16b60bf3f99a
91 user: test
91 user: test
92 date: Thu Jan 01 00:00:04 1970 +0000
92 date: Thu Jan 01 00:00:04 1970 +0000
93 files: a b d
93 files: a b d
94 description:
94 description:
95 d
95 d
96
96
97
97
98 diff -r 21fba396af4c -r 16b60bf3f99a d
98 diff -r 21fba396af4c -r 16b60bf3f99a d
99 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
99 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
100 +++ b/d Thu Jan 01 00:00:04 1970 +0000
100 +++ b/d Thu Jan 01 00:00:04 1970 +0000
101 @@ -0,0 +1,1 @@
101 @@ -0,0 +1,1 @@
102 +a
102 +a
103
103
104 adding base
104 adding base
105 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
105 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 adding b1
106 adding b1
107 % log -f
107 % log -f
108 changeset: 3:e62f78d544b4
108 changeset: 3:e62f78d544b4
109 tag: tip
109 tag: tip
110 parent: 1:3d5bf5654eda
110 parent: 1:3d5bf5654eda
111 user: test
111 user: test
112 date: Thu Jan 01 00:00:01 1970 +0000
112 date: Thu Jan 01 00:00:01 1970 +0000
113 summary: b1
113 summary: b1
114
114
115 changeset: 1:3d5bf5654eda
115 changeset: 1:3d5bf5654eda
116 user: test
116 user: test
117 date: Thu Jan 01 00:00:01 1970 +0000
117 date: Thu Jan 01 00:00:01 1970 +0000
118 summary: r1
118 summary: r1
119
119
120 changeset: 0:67e992f2c4f3
120 changeset: 0:67e992f2c4f3
121 user: test
121 user: test
122 date: Thu Jan 01 00:00:01 1970 +0000
122 date: Thu Jan 01 00:00:01 1970 +0000
123 summary: base
123 summary: base
124
124
125 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
125 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
126 adding b2
126 adding b2
127 % log -f -r 1:tip
127 % log -f -r 1:tip
128 changeset: 1:3d5bf5654eda
128 changeset: 1:3d5bf5654eda
129 user: test
129 user: test
130 date: Thu Jan 01 00:00:01 1970 +0000
130 date: Thu Jan 01 00:00:01 1970 +0000
131 summary: r1
131 summary: r1
132
132
133 changeset: 2:60c670bf5b30
133 changeset: 2:60c670bf5b30
134 user: test
134 user: test
135 date: Thu Jan 01 00:00:01 1970 +0000
135 date: Thu Jan 01 00:00:01 1970 +0000
136 summary: r2
136 summary: r2
137
137
138 changeset: 3:e62f78d544b4
138 changeset: 3:e62f78d544b4
139 parent: 1:3d5bf5654eda
139 parent: 1:3d5bf5654eda
140 user: test
140 user: test
141 date: Thu Jan 01 00:00:01 1970 +0000
141 date: Thu Jan 01 00:00:01 1970 +0000
142 summary: b1
142 summary: b1
143
143
144 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
144 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 (branch merge, don't forget to commit)
146 (branch merge, don't forget to commit)
147 % log -r . with two parents
148 warning: working directory has two parents, tag '.' uses the first
149 changeset: 3:e62f78d544b4
150 parent: 1:3d5bf5654eda
151 user: test
152 date: Thu Jan 01 00:00:01 1970 +0000
153 summary: b1
154
155 % log -r . with one parent
156 changeset: 5:302e9dd6890d
157 tag: tip
158 parent: 3:e62f78d544b4
159 parent: 4:ddb82e70d1a1
160 user: test
161 date: Thu Jan 01 00:00:01 1970 +0000
162 summary: m12
163
147 % log --follow-first
164 % log --follow-first
148 changeset: 6:2404bbcab562
165 changeset: 6:2404bbcab562
149 tag: tip
166 tag: tip
150 user: test
167 user: test
151 date: Thu Jan 01 00:00:01 1970 +0000
168 date: Thu Jan 01 00:00:01 1970 +0000
152 summary: b1.1
169 summary: b1.1
153
170
154 changeset: 5:302e9dd6890d
171 changeset: 5:302e9dd6890d
155 parent: 3:e62f78d544b4
172 parent: 3:e62f78d544b4
156 parent: 4:ddb82e70d1a1
173 parent: 4:ddb82e70d1a1
157 user: test
174 user: test
158 date: Thu Jan 01 00:00:01 1970 +0000
175 date: Thu Jan 01 00:00:01 1970 +0000
159 summary: m12
176 summary: m12
160
177
161 changeset: 3:e62f78d544b4
178 changeset: 3:e62f78d544b4
162 parent: 1:3d5bf5654eda
179 parent: 1:3d5bf5654eda
163 user: test
180 user: test
164 date: Thu Jan 01 00:00:01 1970 +0000
181 date: Thu Jan 01 00:00:01 1970 +0000
165 summary: b1
182 summary: b1
166
183
167 changeset: 1:3d5bf5654eda
184 changeset: 1:3d5bf5654eda
168 user: test
185 user: test
169 date: Thu Jan 01 00:00:01 1970 +0000
186 date: Thu Jan 01 00:00:01 1970 +0000
170 summary: r1
187 summary: r1
171
188
172 changeset: 0:67e992f2c4f3
189 changeset: 0:67e992f2c4f3
173 user: test
190 user: test
174 date: Thu Jan 01 00:00:01 1970 +0000
191 date: Thu Jan 01 00:00:01 1970 +0000
175 summary: base
192 summary: base
176
193
177 % log -P 2
194 % log -P 2
178 changeset: 6:2404bbcab562
195 changeset: 6:2404bbcab562
179 tag: tip
196 tag: tip
180 user: test
197 user: test
181 date: Thu Jan 01 00:00:01 1970 +0000
198 date: Thu Jan 01 00:00:01 1970 +0000
182 summary: b1.1
199 summary: b1.1
183
200
184 changeset: 5:302e9dd6890d
201 changeset: 5:302e9dd6890d
185 parent: 3:e62f78d544b4
202 parent: 3:e62f78d544b4
186 parent: 4:ddb82e70d1a1
203 parent: 4:ddb82e70d1a1
187 user: test
204 user: test
188 date: Thu Jan 01 00:00:01 1970 +0000
205 date: Thu Jan 01 00:00:01 1970 +0000
189 summary: m12
206 summary: m12
190
207
191 changeset: 4:ddb82e70d1a1
208 changeset: 4:ddb82e70d1a1
192 parent: 0:67e992f2c4f3
209 parent: 0:67e992f2c4f3
193 user: test
210 user: test
194 date: Thu Jan 01 00:00:01 1970 +0000
211 date: Thu Jan 01 00:00:01 1970 +0000
195 summary: b2
212 summary: b2
196
213
197 changeset: 3:e62f78d544b4
214 changeset: 3:e62f78d544b4
198 parent: 1:3d5bf5654eda
215 parent: 1:3d5bf5654eda
199 user: test
216 user: test
200 date: Thu Jan 01 00:00:01 1970 +0000
217 date: Thu Jan 01 00:00:01 1970 +0000
201 summary: b1
218 summary: b1
202
219
203 % log -r ""
220 % log -r ""
204 abort: Ambiguous identifier!
221 abort: Ambiguous identifier!
General Comments 0
You need to be logged in to leave comments. Login now