##// END OF EJS Templates
Print less scary warning when invalidating the branch cache.
Thomas Arendsen Hein -
r6056:0ad2ffbf default
parent child Browse files
Show More
@@ -1,2081 +1,2081 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._datafilters = {}
86 self._datafilters = {}
87 self._transref = self._lockref = self._wlockref = None
87 self._transref = self._lockref = self._wlockref = None
88
88
89 def __getattr__(self, name):
89 def __getattr__(self, name):
90 if name == 'changelog':
90 if name == 'changelog':
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 return self.changelog
93 return self.changelog
94 if name == 'manifest':
94 if name == 'manifest':
95 self.changelog
95 self.changelog
96 self.manifest = manifest.manifest(self.sopener)
96 self.manifest = manifest.manifest(self.sopener)
97 return self.manifest
97 return self.manifest
98 if name == 'dirstate':
98 if name == 'dirstate':
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
100 return self.dirstate
100 return self.dirstate
101 else:
101 else:
102 raise AttributeError, name
102 raise AttributeError, name
103
103
104 def url(self):
104 def url(self):
105 return 'file:' + self.root
105 return 'file:' + self.root
106
106
107 def hook(self, name, throw=False, **args):
107 def hook(self, name, throw=False, **args):
108 return hook.hook(self.ui, self, name, throw, **args)
108 return hook.hook(self.ui, self, name, throw, **args)
109
109
110 tag_disallowed = ':\r\n'
110 tag_disallowed = ':\r\n'
111
111
112 def _tag(self, name, node, message, local, user, date, parent=None,
112 def _tag(self, name, node, message, local, user, date, parent=None,
113 extra={}):
113 extra={}):
114 use_dirstate = parent is None
114 use_dirstate = parent is None
115
115
116 for c in self.tag_disallowed:
116 for c in self.tag_disallowed:
117 if c in name:
117 if c in name:
118 raise util.Abort(_('%r cannot be used in a tag name') % c)
118 raise util.Abort(_('%r cannot be used in a tag name') % c)
119
119
120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
121
121
122 def writetag(fp, name, munge, prevtags):
122 def writetag(fp, name, munge, prevtags):
123 fp.seek(0, 2)
123 fp.seek(0, 2)
124 if prevtags and prevtags[-1] != '\n':
124 if prevtags and prevtags[-1] != '\n':
125 fp.write('\n')
125 fp.write('\n')
126 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
126 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
127 fp.close()
127 fp.close()
128
128
129 prevtags = ''
129 prevtags = ''
130 if local:
130 if local:
131 try:
131 try:
132 fp = self.opener('localtags', 'r+')
132 fp = self.opener('localtags', 'r+')
133 except IOError, err:
133 except IOError, err:
134 fp = self.opener('localtags', 'a')
134 fp = self.opener('localtags', 'a')
135 else:
135 else:
136 prevtags = fp.read()
136 prevtags = fp.read()
137
137
138 # local tags are stored in the current charset
138 # local tags are stored in the current charset
139 writetag(fp, name, None, prevtags)
139 writetag(fp, name, None, prevtags)
140 self.hook('tag', node=hex(node), tag=name, local=local)
140 self.hook('tag', node=hex(node), tag=name, local=local)
141 return
141 return
142
142
143 if use_dirstate:
143 if use_dirstate:
144 try:
144 try:
145 fp = self.wfile('.hgtags', 'rb+')
145 fp = self.wfile('.hgtags', 'rb+')
146 except IOError, err:
146 except IOError, err:
147 fp = self.wfile('.hgtags', 'ab')
147 fp = self.wfile('.hgtags', 'ab')
148 else:
148 else:
149 prevtags = fp.read()
149 prevtags = fp.read()
150 else:
150 else:
151 try:
151 try:
152 prevtags = self.filectx('.hgtags', parent).data()
152 prevtags = self.filectx('.hgtags', parent).data()
153 except revlog.LookupError:
153 except revlog.LookupError:
154 pass
154 pass
155 fp = self.wfile('.hgtags', 'wb')
155 fp = self.wfile('.hgtags', 'wb')
156 if prevtags:
156 if prevtags:
157 fp.write(prevtags)
157 fp.write(prevtags)
158
158
159 # committed tags are stored in UTF-8
159 # committed tags are stored in UTF-8
160 writetag(fp, name, util.fromlocal, prevtags)
160 writetag(fp, name, util.fromlocal, prevtags)
161
161
162 if use_dirstate and '.hgtags' not in self.dirstate:
162 if use_dirstate and '.hgtags' not in self.dirstate:
163 self.add(['.hgtags'])
163 self.add(['.hgtags'])
164
164
165 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
165 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
166 extra=extra)
166 extra=extra)
167
167
168 self.hook('tag', node=hex(node), tag=name, local=local)
168 self.hook('tag', node=hex(node), tag=name, local=local)
169
169
170 return tagnode
170 return tagnode
171
171
172 def tag(self, name, node, message, local, user, date):
172 def tag(self, name, node, message, local, user, date):
173 '''tag a revision with a symbolic name.
173 '''tag a revision with a symbolic name.
174
174
175 if local is True, the tag is stored in a per-repository file.
175 if local is True, the tag is stored in a per-repository file.
176 otherwise, it is stored in the .hgtags file, and a new
176 otherwise, it is stored in the .hgtags file, and a new
177 changeset is committed with the change.
177 changeset is committed with the change.
178
178
179 keyword arguments:
179 keyword arguments:
180
180
181 local: whether to store tag in non-version-controlled file
181 local: whether to store tag in non-version-controlled file
182 (default False)
182 (default False)
183
183
184 message: commit message to use if committing
184 message: commit message to use if committing
185
185
186 user: name of user to use if committing
186 user: name of user to use if committing
187
187
188 date: date tuple to use if committing'''
188 date: date tuple to use if committing'''
189
189
190 for x in self.status()[:5]:
190 for x in self.status()[:5]:
191 if '.hgtags' in x:
191 if '.hgtags' in x:
192 raise util.Abort(_('working copy of .hgtags is changed '
192 raise util.Abort(_('working copy of .hgtags is changed '
193 '(please commit .hgtags manually)'))
193 '(please commit .hgtags manually)'))
194
194
195
195
196 self._tag(name, node, message, local, user, date)
196 self._tag(name, node, message, local, user, date)
197
197
198 def tags(self):
198 def tags(self):
199 '''return a mapping of tag to node'''
199 '''return a mapping of tag to node'''
200 if self.tagscache:
200 if self.tagscache:
201 return self.tagscache
201 return self.tagscache
202
202
203 globaltags = {}
203 globaltags = {}
204 tagtypes = {}
204 tagtypes = {}
205
205
206 def readtags(lines, fn, tagtype):
206 def readtags(lines, fn, tagtype):
207 filetags = {}
207 filetags = {}
208 count = 0
208 count = 0
209
209
210 def warn(msg):
210 def warn(msg):
211 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
211 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
212
212
213 for l in lines:
213 for l in lines:
214 count += 1
214 count += 1
215 if not l:
215 if not l:
216 continue
216 continue
217 s = l.split(" ", 1)
217 s = l.split(" ", 1)
218 if len(s) != 2:
218 if len(s) != 2:
219 warn(_("cannot parse entry"))
219 warn(_("cannot parse entry"))
220 continue
220 continue
221 node, key = s
221 node, key = s
222 key = util.tolocal(key.strip()) # stored in UTF-8
222 key = util.tolocal(key.strip()) # stored in UTF-8
223 try:
223 try:
224 bin_n = bin(node)
224 bin_n = bin(node)
225 except TypeError:
225 except TypeError:
226 warn(_("node '%s' is not well formed") % node)
226 warn(_("node '%s' is not well formed") % node)
227 continue
227 continue
228 if bin_n not in self.changelog.nodemap:
228 if bin_n not in self.changelog.nodemap:
229 warn(_("tag '%s' refers to unknown node") % key)
229 warn(_("tag '%s' refers to unknown node") % key)
230 continue
230 continue
231
231
232 h = []
232 h = []
233 if key in filetags:
233 if key in filetags:
234 n, h = filetags[key]
234 n, h = filetags[key]
235 h.append(n)
235 h.append(n)
236 filetags[key] = (bin_n, h)
236 filetags[key] = (bin_n, h)
237
237
238 for k, nh in filetags.items():
238 for k, nh in filetags.items():
239 if k not in globaltags:
239 if k not in globaltags:
240 globaltags[k] = nh
240 globaltags[k] = nh
241 tagtypes[k] = tagtype
241 tagtypes[k] = tagtype
242 continue
242 continue
243
243
244 # we prefer the global tag if:
244 # we prefer the global tag if:
245 # it supercedes us OR
245 # it supercedes us OR
246 # mutual supercedes and it has a higher rank
246 # mutual supercedes and it has a higher rank
247 # otherwise we win because we're tip-most
247 # otherwise we win because we're tip-most
248 an, ah = nh
248 an, ah = nh
249 bn, bh = globaltags[k]
249 bn, bh = globaltags[k]
250 if (bn != an and an in bh and
250 if (bn != an and an in bh and
251 (bn not in ah or len(bh) > len(ah))):
251 (bn not in ah or len(bh) > len(ah))):
252 an = bn
252 an = bn
253 ah.extend([n for n in bh if n not in ah])
253 ah.extend([n for n in bh if n not in ah])
254 globaltags[k] = an, ah
254 globaltags[k] = an, ah
255 tagtypes[k] = tagtype
255 tagtypes[k] = tagtype
256
256
257 # read the tags file from each head, ending with the tip
257 # read the tags file from each head, ending with the tip
258 f = None
258 f = None
259 for rev, node, fnode in self._hgtagsnodes():
259 for rev, node, fnode in self._hgtagsnodes():
260 f = (f and f.filectx(fnode) or
260 f = (f and f.filectx(fnode) or
261 self.filectx('.hgtags', fileid=fnode))
261 self.filectx('.hgtags', fileid=fnode))
262 readtags(f.data().splitlines(), f, "global")
262 readtags(f.data().splitlines(), f, "global")
263
263
264 try:
264 try:
265 data = util.fromlocal(self.opener("localtags").read())
265 data = util.fromlocal(self.opener("localtags").read())
266 # localtags are stored in the local character set
266 # localtags are stored in the local character set
267 # while the internal tag table is stored in UTF-8
267 # while the internal tag table is stored in UTF-8
268 readtags(data.splitlines(), "localtags", "local")
268 readtags(data.splitlines(), "localtags", "local")
269 except IOError:
269 except IOError:
270 pass
270 pass
271
271
272 self.tagscache = {}
272 self.tagscache = {}
273 self._tagstypecache = {}
273 self._tagstypecache = {}
274 for k,nh in globaltags.items():
274 for k,nh in globaltags.items():
275 n = nh[0]
275 n = nh[0]
276 if n != nullid:
276 if n != nullid:
277 self.tagscache[k] = n
277 self.tagscache[k] = n
278 self._tagstypecache[k] = tagtypes[k]
278 self._tagstypecache[k] = tagtypes[k]
279 self.tagscache['tip'] = self.changelog.tip()
279 self.tagscache['tip'] = self.changelog.tip()
280
280
281 return self.tagscache
281 return self.tagscache
282
282
283 def tagtype(self, tagname):
283 def tagtype(self, tagname):
284 '''
284 '''
285 return the type of the given tag. result can be:
285 return the type of the given tag. result can be:
286
286
287 'local' : a local tag
287 'local' : a local tag
288 'global' : a global tag
288 'global' : a global tag
289 None : tag does not exist
289 None : tag does not exist
290 '''
290 '''
291
291
292 self.tags()
292 self.tags()
293
293
294 return self._tagstypecache.get(tagname)
294 return self._tagstypecache.get(tagname)
295
295
296 def _hgtagsnodes(self):
296 def _hgtagsnodes(self):
297 heads = self.heads()
297 heads = self.heads()
298 heads.reverse()
298 heads.reverse()
299 last = {}
299 last = {}
300 ret = []
300 ret = []
301 for node in heads:
301 for node in heads:
302 c = self.changectx(node)
302 c = self.changectx(node)
303 rev = c.rev()
303 rev = c.rev()
304 try:
304 try:
305 fnode = c.filenode('.hgtags')
305 fnode = c.filenode('.hgtags')
306 except revlog.LookupError:
306 except revlog.LookupError:
307 continue
307 continue
308 ret.append((rev, node, fnode))
308 ret.append((rev, node, fnode))
309 if fnode in last:
309 if fnode in last:
310 ret[last[fnode]] = None
310 ret[last[fnode]] = None
311 last[fnode] = len(ret) - 1
311 last[fnode] = len(ret) - 1
312 return [item for item in ret if item]
312 return [item for item in ret if item]
313
313
314 def tagslist(self):
314 def tagslist(self):
315 '''return a list of tags ordered by revision'''
315 '''return a list of tags ordered by revision'''
316 l = []
316 l = []
317 for t, n in self.tags().items():
317 for t, n in self.tags().items():
318 try:
318 try:
319 r = self.changelog.rev(n)
319 r = self.changelog.rev(n)
320 except:
320 except:
321 r = -2 # sort to the beginning of the list if unknown
321 r = -2 # sort to the beginning of the list if unknown
322 l.append((r, t, n))
322 l.append((r, t, n))
323 l.sort()
323 l.sort()
324 return [(t, n) for r, t, n in l]
324 return [(t, n) for r, t, n in l]
325
325
326 def nodetags(self, node):
326 def nodetags(self, node):
327 '''return the tags associated with a node'''
327 '''return the tags associated with a node'''
328 if not self.nodetagscache:
328 if not self.nodetagscache:
329 self.nodetagscache = {}
329 self.nodetagscache = {}
330 for t, n in self.tags().items():
330 for t, n in self.tags().items():
331 self.nodetagscache.setdefault(n, []).append(t)
331 self.nodetagscache.setdefault(n, []).append(t)
332 return self.nodetagscache.get(node, [])
332 return self.nodetagscache.get(node, [])
333
333
334 def _branchtags(self):
334 def _branchtags(self):
335 partial, last, lrev = self._readbranchcache()
335 partial, last, lrev = self._readbranchcache()
336
336
337 tiprev = self.changelog.count() - 1
337 tiprev = self.changelog.count() - 1
338 if lrev != tiprev:
338 if lrev != tiprev:
339 self._updatebranchcache(partial, lrev+1, tiprev+1)
339 self._updatebranchcache(partial, lrev+1, tiprev+1)
340 self._writebranchcache(partial, self.changelog.tip(), tiprev)
340 self._writebranchcache(partial, self.changelog.tip(), tiprev)
341
341
342 return partial
342 return partial
343
343
344 def branchtags(self):
344 def branchtags(self):
345 if self.branchcache is not None:
345 if self.branchcache is not None:
346 return self.branchcache
346 return self.branchcache
347
347
348 self.branchcache = {} # avoid recursion in changectx
348 self.branchcache = {} # avoid recursion in changectx
349 partial = self._branchtags()
349 partial = self._branchtags()
350
350
351 # the branch cache is stored on disk as UTF-8, but in the local
351 # the branch cache is stored on disk as UTF-8, but in the local
352 # charset internally
352 # charset internally
353 for k, v in partial.items():
353 for k, v in partial.items():
354 self.branchcache[util.tolocal(k)] = v
354 self.branchcache[util.tolocal(k)] = v
355 return self.branchcache
355 return self.branchcache
356
356
357 def _readbranchcache(self):
357 def _readbranchcache(self):
358 partial = {}
358 partial = {}
359 try:
359 try:
360 f = self.opener("branch.cache")
360 f = self.opener("branch.cache")
361 lines = f.read().split('\n')
361 lines = f.read().split('\n')
362 f.close()
362 f.close()
363 except (IOError, OSError):
363 except (IOError, OSError):
364 return {}, nullid, nullrev
364 return {}, nullid, nullrev
365
365
366 try:
366 try:
367 last, lrev = lines.pop(0).split(" ", 1)
367 last, lrev = lines.pop(0).split(" ", 1)
368 last, lrev = bin(last), int(lrev)
368 last, lrev = bin(last), int(lrev)
369 if not (lrev < self.changelog.count() and
369 if not (lrev < self.changelog.count() and
370 self.changelog.node(lrev) == last): # sanity check
370 self.changelog.node(lrev) == last): # sanity check
371 # invalidate the cache
371 # invalidate the cache
372 raise ValueError('Invalid branch cache: unknown tip')
372 raise ValueError('invalidating branch cache (tip differs)')
373 for l in lines:
373 for l in lines:
374 if not l: continue
374 if not l: continue
375 node, label = l.split(" ", 1)
375 node, label = l.split(" ", 1)
376 partial[label.strip()] = bin(node)
376 partial[label.strip()] = bin(node)
377 except (KeyboardInterrupt, util.SignalInterrupt):
377 except (KeyboardInterrupt, util.SignalInterrupt):
378 raise
378 raise
379 except Exception, inst:
379 except Exception, inst:
380 if self.ui.debugflag:
380 if self.ui.debugflag:
381 self.ui.warn(str(inst), '\n')
381 self.ui.warn(str(inst), '\n')
382 partial, last, lrev = {}, nullid, nullrev
382 partial, last, lrev = {}, nullid, nullrev
383 return partial, last, lrev
383 return partial, last, lrev
384
384
385 def _writebranchcache(self, branches, tip, tiprev):
385 def _writebranchcache(self, branches, tip, tiprev):
386 try:
386 try:
387 f = self.opener("branch.cache", "w", atomictemp=True)
387 f = self.opener("branch.cache", "w", atomictemp=True)
388 f.write("%s %s\n" % (hex(tip), tiprev))
388 f.write("%s %s\n" % (hex(tip), tiprev))
389 for label, node in branches.iteritems():
389 for label, node in branches.iteritems():
390 f.write("%s %s\n" % (hex(node), label))
390 f.write("%s %s\n" % (hex(node), label))
391 f.rename()
391 f.rename()
392 except (IOError, OSError):
392 except (IOError, OSError):
393 pass
393 pass
394
394
395 def _updatebranchcache(self, partial, start, end):
395 def _updatebranchcache(self, partial, start, end):
396 for r in xrange(start, end):
396 for r in xrange(start, end):
397 c = self.changectx(r)
397 c = self.changectx(r)
398 b = c.branch()
398 b = c.branch()
399 partial[b] = c.node()
399 partial[b] = c.node()
400
400
401 def lookup(self, key):
401 def lookup(self, key):
402 if key == '.':
402 if key == '.':
403 key, second = self.dirstate.parents()
403 key, second = self.dirstate.parents()
404 if key == nullid:
404 if key == nullid:
405 raise repo.RepoError(_("no revision checked out"))
405 raise repo.RepoError(_("no revision checked out"))
406 if second != nullid:
406 if second != nullid:
407 self.ui.warn(_("warning: working directory has two parents, "
407 self.ui.warn(_("warning: working directory has two parents, "
408 "tag '.' uses the first\n"))
408 "tag '.' uses the first\n"))
409 elif key == 'null':
409 elif key == 'null':
410 return nullid
410 return nullid
411 n = self.changelog._match(key)
411 n = self.changelog._match(key)
412 if n:
412 if n:
413 return n
413 return n
414 if key in self.tags():
414 if key in self.tags():
415 return self.tags()[key]
415 return self.tags()[key]
416 if key in self.branchtags():
416 if key in self.branchtags():
417 return self.branchtags()[key]
417 return self.branchtags()[key]
418 n = self.changelog._partialmatch(key)
418 n = self.changelog._partialmatch(key)
419 if n:
419 if n:
420 return n
420 return n
421 try:
421 try:
422 if len(key) == 20:
422 if len(key) == 20:
423 key = hex(key)
423 key = hex(key)
424 except:
424 except:
425 pass
425 pass
426 raise repo.RepoError(_("unknown revision '%s'") % key)
426 raise repo.RepoError(_("unknown revision '%s'") % key)
427
427
428 def dev(self):
428 def dev(self):
429 return os.lstat(self.path).st_dev
429 return os.lstat(self.path).st_dev
430
430
431 def local(self):
431 def local(self):
432 return True
432 return True
433
433
434 def join(self, f):
434 def join(self, f):
435 return os.path.join(self.path, f)
435 return os.path.join(self.path, f)
436
436
437 def sjoin(self, f):
437 def sjoin(self, f):
438 f = self.encodefn(f)
438 f = self.encodefn(f)
439 return os.path.join(self.spath, f)
439 return os.path.join(self.spath, f)
440
440
441 def wjoin(self, f):
441 def wjoin(self, f):
442 return os.path.join(self.root, f)
442 return os.path.join(self.root, f)
443
443
444 def file(self, f):
444 def file(self, f):
445 if f[0] == '/':
445 if f[0] == '/':
446 f = f[1:]
446 f = f[1:]
447 return filelog.filelog(self.sopener, f)
447 return filelog.filelog(self.sopener, f)
448
448
449 def changectx(self, changeid=None):
449 def changectx(self, changeid=None):
450 return context.changectx(self, changeid)
450 return context.changectx(self, changeid)
451
451
452 def workingctx(self):
452 def workingctx(self):
453 return context.workingctx(self)
453 return context.workingctx(self)
454
454
455 def parents(self, changeid=None):
455 def parents(self, changeid=None):
456 '''
456 '''
457 get list of changectxs for parents of changeid or working directory
457 get list of changectxs for parents of changeid or working directory
458 '''
458 '''
459 if changeid is None:
459 if changeid is None:
460 pl = self.dirstate.parents()
460 pl = self.dirstate.parents()
461 else:
461 else:
462 n = self.changelog.lookup(changeid)
462 n = self.changelog.lookup(changeid)
463 pl = self.changelog.parents(n)
463 pl = self.changelog.parents(n)
464 if pl[1] == nullid:
464 if pl[1] == nullid:
465 return [self.changectx(pl[0])]
465 return [self.changectx(pl[0])]
466 return [self.changectx(pl[0]), self.changectx(pl[1])]
466 return [self.changectx(pl[0]), self.changectx(pl[1])]
467
467
468 def filectx(self, path, changeid=None, fileid=None):
468 def filectx(self, path, changeid=None, fileid=None):
469 """changeid can be a changeset revision, node, or tag.
469 """changeid can be a changeset revision, node, or tag.
470 fileid can be a file revision or node."""
470 fileid can be a file revision or node."""
471 return context.filectx(self, path, changeid, fileid)
471 return context.filectx(self, path, changeid, fileid)
472
472
473 def getcwd(self):
473 def getcwd(self):
474 return self.dirstate.getcwd()
474 return self.dirstate.getcwd()
475
475
476 def pathto(self, f, cwd=None):
476 def pathto(self, f, cwd=None):
477 return self.dirstate.pathto(f, cwd)
477 return self.dirstate.pathto(f, cwd)
478
478
479 def wfile(self, f, mode='r'):
479 def wfile(self, f, mode='r'):
480 return self.wopener(f, mode)
480 return self.wopener(f, mode)
481
481
482 def _link(self, f):
482 def _link(self, f):
483 return os.path.islink(self.wjoin(f))
483 return os.path.islink(self.wjoin(f))
484
484
485 def _filter(self, filter, filename, data):
485 def _filter(self, filter, filename, data):
486 if filter not in self.filterpats:
486 if filter not in self.filterpats:
487 l = []
487 l = []
488 for pat, cmd in self.ui.configitems(filter):
488 for pat, cmd in self.ui.configitems(filter):
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 fn = None
490 fn = None
491 for name, filterfn in self._datafilters.iteritems():
491 for name, filterfn in self._datafilters.iteritems():
492 if cmd.startswith(name):
492 if cmd.startswith(name):
493 fn = filterfn
493 fn = filterfn
494 break
494 break
495 if not fn:
495 if not fn:
496 fn = lambda s, c, **kwargs: util.filter(s, c)
496 fn = lambda s, c, **kwargs: util.filter(s, c)
497 # Wrap old filters not supporting keyword arguments
497 # Wrap old filters not supporting keyword arguments
498 if not inspect.getargspec(fn)[2]:
498 if not inspect.getargspec(fn)[2]:
499 oldfn = fn
499 oldfn = fn
500 fn = lambda s, c, **kwargs: oldfn(s, c)
500 fn = lambda s, c, **kwargs: oldfn(s, c)
501 l.append((mf, fn, cmd))
501 l.append((mf, fn, cmd))
502 self.filterpats[filter] = l
502 self.filterpats[filter] = l
503
503
504 for mf, fn, cmd in self.filterpats[filter]:
504 for mf, fn, cmd in self.filterpats[filter]:
505 if mf(filename):
505 if mf(filename):
506 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
506 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
507 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
507 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
508 break
508 break
509
509
510 return data
510 return data
511
511
512 def adddatafilter(self, name, filter):
512 def adddatafilter(self, name, filter):
513 self._datafilters[name] = filter
513 self._datafilters[name] = filter
514
514
515 def wread(self, filename):
515 def wread(self, filename):
516 if self._link(filename):
516 if self._link(filename):
517 data = os.readlink(self.wjoin(filename))
517 data = os.readlink(self.wjoin(filename))
518 else:
518 else:
519 data = self.wopener(filename, 'r').read()
519 data = self.wopener(filename, 'r').read()
520 return self._filter("encode", filename, data)
520 return self._filter("encode", filename, data)
521
521
522 def wwrite(self, filename, data, flags):
522 def wwrite(self, filename, data, flags):
523 data = self._filter("decode", filename, data)
523 data = self._filter("decode", filename, data)
524 try:
524 try:
525 os.unlink(self.wjoin(filename))
525 os.unlink(self.wjoin(filename))
526 except OSError:
526 except OSError:
527 pass
527 pass
528 self.wopener(filename, 'w').write(data)
528 self.wopener(filename, 'w').write(data)
529 util.set_flags(self.wjoin(filename), flags)
529 util.set_flags(self.wjoin(filename), flags)
530
530
531 def wwritedata(self, filename, data):
531 def wwritedata(self, filename, data):
532 return self._filter("decode", filename, data)
532 return self._filter("decode", filename, data)
533
533
534 def transaction(self):
534 def transaction(self):
535 if self._transref and self._transref():
535 if self._transref and self._transref():
536 return self._transref().nest()
536 return self._transref().nest()
537
537
538 # abort here if the journal already exists
538 # abort here if the journal already exists
539 if os.path.exists(self.sjoin("journal")):
539 if os.path.exists(self.sjoin("journal")):
540 raise repo.RepoError(_("journal already exists - run hg recover"))
540 raise repo.RepoError(_("journal already exists - run hg recover"))
541
541
542 # save dirstate for rollback
542 # save dirstate for rollback
543 try:
543 try:
544 ds = self.opener("dirstate").read()
544 ds = self.opener("dirstate").read()
545 except IOError:
545 except IOError:
546 ds = ""
546 ds = ""
547 self.opener("journal.dirstate", "w").write(ds)
547 self.opener("journal.dirstate", "w").write(ds)
548 self.opener("journal.branch", "w").write(self.dirstate.branch())
548 self.opener("journal.branch", "w").write(self.dirstate.branch())
549
549
550 renames = [(self.sjoin("journal"), self.sjoin("undo")),
550 renames = [(self.sjoin("journal"), self.sjoin("undo")),
551 (self.join("journal.dirstate"), self.join("undo.dirstate")),
551 (self.join("journal.dirstate"), self.join("undo.dirstate")),
552 (self.join("journal.branch"), self.join("undo.branch"))]
552 (self.join("journal.branch"), self.join("undo.branch"))]
553 tr = transaction.transaction(self.ui.warn, self.sopener,
553 tr = transaction.transaction(self.ui.warn, self.sopener,
554 self.sjoin("journal"),
554 self.sjoin("journal"),
555 aftertrans(renames))
555 aftertrans(renames))
556 self._transref = weakref.ref(tr)
556 self._transref = weakref.ref(tr)
557 return tr
557 return tr
558
558
559 def recover(self):
559 def recover(self):
560 l = self.lock()
560 l = self.lock()
561 try:
561 try:
562 if os.path.exists(self.sjoin("journal")):
562 if os.path.exists(self.sjoin("journal")):
563 self.ui.status(_("rolling back interrupted transaction\n"))
563 self.ui.status(_("rolling back interrupted transaction\n"))
564 transaction.rollback(self.sopener, self.sjoin("journal"))
564 transaction.rollback(self.sopener, self.sjoin("journal"))
565 self.invalidate()
565 self.invalidate()
566 return True
566 return True
567 else:
567 else:
568 self.ui.warn(_("no interrupted transaction available\n"))
568 self.ui.warn(_("no interrupted transaction available\n"))
569 return False
569 return False
570 finally:
570 finally:
571 del l
571 del l
572
572
573 def rollback(self):
573 def rollback(self):
574 wlock = lock = None
574 wlock = lock = None
575 try:
575 try:
576 wlock = self.wlock()
576 wlock = self.wlock()
577 lock = self.lock()
577 lock = self.lock()
578 if os.path.exists(self.sjoin("undo")):
578 if os.path.exists(self.sjoin("undo")):
579 self.ui.status(_("rolling back last transaction\n"))
579 self.ui.status(_("rolling back last transaction\n"))
580 transaction.rollback(self.sopener, self.sjoin("undo"))
580 transaction.rollback(self.sopener, self.sjoin("undo"))
581 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
581 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
582 branch = self.opener("undo.branch").read()
582 branch = self.opener("undo.branch").read()
583 self.dirstate.setbranch(branch)
583 self.dirstate.setbranch(branch)
584 self.invalidate()
584 self.invalidate()
585 self.dirstate.invalidate()
585 self.dirstate.invalidate()
586 else:
586 else:
587 self.ui.warn(_("no rollback information available\n"))
587 self.ui.warn(_("no rollback information available\n"))
588 finally:
588 finally:
589 del lock, wlock
589 del lock, wlock
590
590
591 def invalidate(self):
591 def invalidate(self):
592 for a in "changelog manifest".split():
592 for a in "changelog manifest".split():
593 if hasattr(self, a):
593 if hasattr(self, a):
594 self.__delattr__(a)
594 self.__delattr__(a)
595 self.tagscache = None
595 self.tagscache = None
596 self._tagstypecache = None
596 self._tagstypecache = None
597 self.nodetagscache = None
597 self.nodetagscache = None
598
598
599 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
599 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
600 try:
600 try:
601 l = lock.lock(lockname, 0, releasefn, desc=desc)
601 l = lock.lock(lockname, 0, releasefn, desc=desc)
602 except lock.LockHeld, inst:
602 except lock.LockHeld, inst:
603 if not wait:
603 if not wait:
604 raise
604 raise
605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
606 (desc, inst.locker))
606 (desc, inst.locker))
607 # default to 600 seconds timeout
607 # default to 600 seconds timeout
608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
609 releasefn, desc=desc)
609 releasefn, desc=desc)
610 if acquirefn:
610 if acquirefn:
611 acquirefn()
611 acquirefn()
612 return l
612 return l
613
613
614 def lock(self, wait=True):
614 def lock(self, wait=True):
615 if self._lockref and self._lockref():
615 if self._lockref and self._lockref():
616 return self._lockref()
616 return self._lockref()
617
617
618 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
618 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
619 _('repository %s') % self.origroot)
619 _('repository %s') % self.origroot)
620 self._lockref = weakref.ref(l)
620 self._lockref = weakref.ref(l)
621 return l
621 return l
622
622
623 def wlock(self, wait=True):
623 def wlock(self, wait=True):
624 if self._wlockref and self._wlockref():
624 if self._wlockref and self._wlockref():
625 return self._wlockref()
625 return self._wlockref()
626
626
627 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
627 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
628 self.dirstate.invalidate, _('working directory of %s') %
628 self.dirstate.invalidate, _('working directory of %s') %
629 self.origroot)
629 self.origroot)
630 self._wlockref = weakref.ref(l)
630 self._wlockref = weakref.ref(l)
631 return l
631 return l
632
632
633 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
633 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
634 """
634 """
635 commit an individual file as part of a larger transaction
635 commit an individual file as part of a larger transaction
636 """
636 """
637
637
638 t = self.wread(fn)
638 t = self.wread(fn)
639 fl = self.file(fn)
639 fl = self.file(fn)
640 fp1 = manifest1.get(fn, nullid)
640 fp1 = manifest1.get(fn, nullid)
641 fp2 = manifest2.get(fn, nullid)
641 fp2 = manifest2.get(fn, nullid)
642
642
643 meta = {}
643 meta = {}
644 cp = self.dirstate.copied(fn)
644 cp = self.dirstate.copied(fn)
645 if cp:
645 if cp:
646 # Mark the new revision of this file as a copy of another
646 # Mark the new revision of this file as a copy of another
647 # file. This copy data will effectively act as a parent
647 # file. This copy data will effectively act as a parent
648 # of this new revision. If this is a merge, the first
648 # of this new revision. If this is a merge, the first
649 # parent will be the nullid (meaning "look up the copy data")
649 # parent will be the nullid (meaning "look up the copy data")
650 # and the second one will be the other parent. For example:
650 # and the second one will be the other parent. For example:
651 #
651 #
652 # 0 --- 1 --- 3 rev1 changes file foo
652 # 0 --- 1 --- 3 rev1 changes file foo
653 # \ / rev2 renames foo to bar and changes it
653 # \ / rev2 renames foo to bar and changes it
654 # \- 2 -/ rev3 should have bar with all changes and
654 # \- 2 -/ rev3 should have bar with all changes and
655 # should record that bar descends from
655 # should record that bar descends from
656 # bar in rev2 and foo in rev1
656 # bar in rev2 and foo in rev1
657 #
657 #
658 # this allows this merge to succeed:
658 # this allows this merge to succeed:
659 #
659 #
660 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
660 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
661 # \ / merging rev3 and rev4 should use bar@rev2
661 # \ / merging rev3 and rev4 should use bar@rev2
662 # \- 2 --- 4 as the merge base
662 # \- 2 --- 4 as the merge base
663 #
663 #
664 meta["copy"] = cp
664 meta["copy"] = cp
665 if not manifest2: # not a branch merge
665 if not manifest2: # not a branch merge
666 meta["copyrev"] = hex(manifest1.get(cp, nullid))
666 meta["copyrev"] = hex(manifest1.get(cp, nullid))
667 fp2 = nullid
667 fp2 = nullid
668 elif fp2 != nullid: # copied on remote side
668 elif fp2 != nullid: # copied on remote side
669 meta["copyrev"] = hex(manifest1.get(cp, nullid))
669 meta["copyrev"] = hex(manifest1.get(cp, nullid))
670 elif fp1 != nullid: # copied on local side, reversed
670 elif fp1 != nullid: # copied on local side, reversed
671 meta["copyrev"] = hex(manifest2.get(cp))
671 meta["copyrev"] = hex(manifest2.get(cp))
672 fp2 = fp1
672 fp2 = fp1
673 elif cp in manifest2: # directory rename on local side
673 elif cp in manifest2: # directory rename on local side
674 meta["copyrev"] = hex(manifest2[cp])
674 meta["copyrev"] = hex(manifest2[cp])
675 else: # directory rename on remote side
675 else: # directory rename on remote side
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 self.ui.debug(_(" %s: copy %s:%s\n") %
677 self.ui.debug(_(" %s: copy %s:%s\n") %
678 (fn, cp, meta["copyrev"]))
678 (fn, cp, meta["copyrev"]))
679 fp1 = nullid
679 fp1 = nullid
680 elif fp2 != nullid:
680 elif fp2 != nullid:
681 # is one parent an ancestor of the other?
681 # is one parent an ancestor of the other?
682 fpa = fl.ancestor(fp1, fp2)
682 fpa = fl.ancestor(fp1, fp2)
683 if fpa == fp1:
683 if fpa == fp1:
684 fp1, fp2 = fp2, nullid
684 fp1, fp2 = fp2, nullid
685 elif fpa == fp2:
685 elif fpa == fp2:
686 fp2 = nullid
686 fp2 = nullid
687
687
688 # is the file unmodified from the parent? report existing entry
688 # is the file unmodified from the parent? report existing entry
689 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
689 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
690 return fp1
690 return fp1
691
691
692 changelist.append(fn)
692 changelist.append(fn)
693 return fl.add(t, meta, tr, linkrev, fp1, fp2)
693 return fl.add(t, meta, tr, linkrev, fp1, fp2)
694
694
695 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
695 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
696 if p1 is None:
696 if p1 is None:
697 p1, p2 = self.dirstate.parents()
697 p1, p2 = self.dirstate.parents()
698 return self.commit(files=files, text=text, user=user, date=date,
698 return self.commit(files=files, text=text, user=user, date=date,
699 p1=p1, p2=p2, extra=extra, empty_ok=True)
699 p1=p1, p2=p2, extra=extra, empty_ok=True)
700
700
701 def commit(self, files=None, text="", user=None, date=None,
701 def commit(self, files=None, text="", user=None, date=None,
702 match=util.always, force=False, force_editor=False,
702 match=util.always, force=False, force_editor=False,
703 p1=None, p2=None, extra={}, empty_ok=False):
703 p1=None, p2=None, extra={}, empty_ok=False):
704 wlock = lock = tr = None
704 wlock = lock = tr = None
705 valid = 0 # don't save the dirstate if this isn't set
705 valid = 0 # don't save the dirstate if this isn't set
706 if files:
706 if files:
707 files = util.unique(files)
707 files = util.unique(files)
708 try:
708 try:
709 commit = []
709 commit = []
710 remove = []
710 remove = []
711 changed = []
711 changed = []
712 use_dirstate = (p1 is None) # not rawcommit
712 use_dirstate = (p1 is None) # not rawcommit
713 extra = extra.copy()
713 extra = extra.copy()
714
714
715 if use_dirstate:
715 if use_dirstate:
716 if files:
716 if files:
717 for f in files:
717 for f in files:
718 s = self.dirstate[f]
718 s = self.dirstate[f]
719 if s in 'nma':
719 if s in 'nma':
720 commit.append(f)
720 commit.append(f)
721 elif s == 'r':
721 elif s == 'r':
722 remove.append(f)
722 remove.append(f)
723 else:
723 else:
724 self.ui.warn(_("%s not tracked!\n") % f)
724 self.ui.warn(_("%s not tracked!\n") % f)
725 else:
725 else:
726 changes = self.status(match=match)[:5]
726 changes = self.status(match=match)[:5]
727 modified, added, removed, deleted, unknown = changes
727 modified, added, removed, deleted, unknown = changes
728 commit = modified + added
728 commit = modified + added
729 remove = removed
729 remove = removed
730 else:
730 else:
731 commit = files
731 commit = files
732
732
733 if use_dirstate:
733 if use_dirstate:
734 p1, p2 = self.dirstate.parents()
734 p1, p2 = self.dirstate.parents()
735 update_dirstate = True
735 update_dirstate = True
736 else:
736 else:
737 p1, p2 = p1, p2 or nullid
737 p1, p2 = p1, p2 or nullid
738 update_dirstate = (self.dirstate.parents()[0] == p1)
738 update_dirstate = (self.dirstate.parents()[0] == p1)
739
739
740 c1 = self.changelog.read(p1)
740 c1 = self.changelog.read(p1)
741 c2 = self.changelog.read(p2)
741 c2 = self.changelog.read(p2)
742 m1 = self.manifest.read(c1[0]).copy()
742 m1 = self.manifest.read(c1[0]).copy()
743 m2 = self.manifest.read(c2[0])
743 m2 = self.manifest.read(c2[0])
744
744
745 if use_dirstate:
745 if use_dirstate:
746 branchname = self.workingctx().branch()
746 branchname = self.workingctx().branch()
747 try:
747 try:
748 branchname = branchname.decode('UTF-8').encode('UTF-8')
748 branchname = branchname.decode('UTF-8').encode('UTF-8')
749 except UnicodeDecodeError:
749 except UnicodeDecodeError:
750 raise util.Abort(_('branch name not in UTF-8!'))
750 raise util.Abort(_('branch name not in UTF-8!'))
751 else:
751 else:
752 branchname = ""
752 branchname = ""
753
753
754 if use_dirstate:
754 if use_dirstate:
755 oldname = c1[5].get("branch") # stored in UTF-8
755 oldname = c1[5].get("branch") # stored in UTF-8
756 if (not commit and not remove and not force and p2 == nullid
756 if (not commit and not remove and not force and p2 == nullid
757 and branchname == oldname):
757 and branchname == oldname):
758 self.ui.status(_("nothing changed\n"))
758 self.ui.status(_("nothing changed\n"))
759 return None
759 return None
760
760
761 xp1 = hex(p1)
761 xp1 = hex(p1)
762 if p2 == nullid: xp2 = ''
762 if p2 == nullid: xp2 = ''
763 else: xp2 = hex(p2)
763 else: xp2 = hex(p2)
764
764
765 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
765 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
766
766
767 wlock = self.wlock()
767 wlock = self.wlock()
768 lock = self.lock()
768 lock = self.lock()
769 tr = self.transaction()
769 tr = self.transaction()
770 trp = weakref.proxy(tr)
770 trp = weakref.proxy(tr)
771
771
772 # check in files
772 # check in files
773 new = {}
773 new = {}
774 linkrev = self.changelog.count()
774 linkrev = self.changelog.count()
775 commit.sort()
775 commit.sort()
776 is_exec = util.execfunc(self.root, m1.execf)
776 is_exec = util.execfunc(self.root, m1.execf)
777 is_link = util.linkfunc(self.root, m1.linkf)
777 is_link = util.linkfunc(self.root, m1.linkf)
778 for f in commit:
778 for f in commit:
779 self.ui.note(f + "\n")
779 self.ui.note(f + "\n")
780 try:
780 try:
781 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
781 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
782 new_exec = is_exec(f)
782 new_exec = is_exec(f)
783 new_link = is_link(f)
783 new_link = is_link(f)
784 if ((not changed or changed[-1] != f) and
784 if ((not changed or changed[-1] != f) and
785 m2.get(f) != new[f]):
785 m2.get(f) != new[f]):
786 # mention the file in the changelog if some
786 # mention the file in the changelog if some
787 # flag changed, even if there was no content
787 # flag changed, even if there was no content
788 # change.
788 # change.
789 old_exec = m1.execf(f)
789 old_exec = m1.execf(f)
790 old_link = m1.linkf(f)
790 old_link = m1.linkf(f)
791 if old_exec != new_exec or old_link != new_link:
791 if old_exec != new_exec or old_link != new_link:
792 changed.append(f)
792 changed.append(f)
793 m1.set(f, new_exec, new_link)
793 m1.set(f, new_exec, new_link)
794 if use_dirstate:
794 if use_dirstate:
795 self.dirstate.normal(f)
795 self.dirstate.normal(f)
796
796
797 except (OSError, IOError):
797 except (OSError, IOError):
798 if use_dirstate:
798 if use_dirstate:
799 self.ui.warn(_("trouble committing %s!\n") % f)
799 self.ui.warn(_("trouble committing %s!\n") % f)
800 raise
800 raise
801 else:
801 else:
802 remove.append(f)
802 remove.append(f)
803
803
804 # update manifest
804 # update manifest
805 m1.update(new)
805 m1.update(new)
806 remove.sort()
806 remove.sort()
807 removed = []
807 removed = []
808
808
809 for f in remove:
809 for f in remove:
810 if f in m1:
810 if f in m1:
811 del m1[f]
811 del m1[f]
812 removed.append(f)
812 removed.append(f)
813 elif f in m2:
813 elif f in m2:
814 removed.append(f)
814 removed.append(f)
815 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
815 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
816 (new, removed))
816 (new, removed))
817
817
818 # add changeset
818 # add changeset
819 new = new.keys()
819 new = new.keys()
820 new.sort()
820 new.sort()
821
821
822 user = user or self.ui.username()
822 user = user or self.ui.username()
823 if (not empty_ok and not text) or force_editor:
823 if (not empty_ok and not text) or force_editor:
824 edittext = []
824 edittext = []
825 if text:
825 if text:
826 edittext.append(text)
826 edittext.append(text)
827 edittext.append("")
827 edittext.append("")
828 edittext.append(_("HG: Enter commit message."
828 edittext.append(_("HG: Enter commit message."
829 " Lines beginning with 'HG:' are removed."))
829 " Lines beginning with 'HG:' are removed."))
830 edittext.append("HG: --")
830 edittext.append("HG: --")
831 edittext.append("HG: user: %s" % user)
831 edittext.append("HG: user: %s" % user)
832 if p2 != nullid:
832 if p2 != nullid:
833 edittext.append("HG: branch merge")
833 edittext.append("HG: branch merge")
834 if branchname:
834 if branchname:
835 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
835 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
836 edittext.extend(["HG: changed %s" % f for f in changed])
836 edittext.extend(["HG: changed %s" % f for f in changed])
837 edittext.extend(["HG: removed %s" % f for f in removed])
837 edittext.extend(["HG: removed %s" % f for f in removed])
838 if not changed and not remove:
838 if not changed and not remove:
839 edittext.append("HG: no files changed")
839 edittext.append("HG: no files changed")
840 edittext.append("")
840 edittext.append("")
841 # run editor in the repository root
841 # run editor in the repository root
842 olddir = os.getcwd()
842 olddir = os.getcwd()
843 os.chdir(self.root)
843 os.chdir(self.root)
844 text = self.ui.edit("\n".join(edittext), user)
844 text = self.ui.edit("\n".join(edittext), user)
845 os.chdir(olddir)
845 os.chdir(olddir)
846
846
847 if branchname:
847 if branchname:
848 extra["branch"] = branchname
848 extra["branch"] = branchname
849
849
850 if use_dirstate:
850 if use_dirstate:
851 lines = [line.rstrip() for line in text.rstrip().splitlines()]
851 lines = [line.rstrip() for line in text.rstrip().splitlines()]
852 while lines and not lines[0]:
852 while lines and not lines[0]:
853 del lines[0]
853 del lines[0]
854 if not lines:
854 if not lines:
855 raise util.Abort(_("empty commit message"))
855 raise util.Abort(_("empty commit message"))
856 text = '\n'.join(lines)
856 text = '\n'.join(lines)
857
857
858 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
858 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
859 user, date, extra)
859 user, date, extra)
860 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
860 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
861 parent2=xp2)
861 parent2=xp2)
862 tr.close()
862 tr.close()
863
863
864 if self.branchcache and "branch" in extra:
864 if self.branchcache and "branch" in extra:
865 self.branchcache[util.tolocal(extra["branch"])] = n
865 self.branchcache[util.tolocal(extra["branch"])] = n
866
866
867 if use_dirstate or update_dirstate:
867 if use_dirstate or update_dirstate:
868 self.dirstate.setparents(n)
868 self.dirstate.setparents(n)
869 if use_dirstate:
869 if use_dirstate:
870 for f in removed:
870 for f in removed:
871 self.dirstate.forget(f)
871 self.dirstate.forget(f)
872 valid = 1 # our dirstate updates are complete
872 valid = 1 # our dirstate updates are complete
873
873
874 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
874 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
875 return n
875 return n
876 finally:
876 finally:
877 if not valid: # don't save our updated dirstate
877 if not valid: # don't save our updated dirstate
878 self.dirstate.invalidate()
878 self.dirstate.invalidate()
879 del tr, lock, wlock
879 del tr, lock, wlock
880
880
881 def walk(self, node=None, files=[], match=util.always, badmatch=None):
881 def walk(self, node=None, files=[], match=util.always, badmatch=None):
882 '''
882 '''
883 walk recursively through the directory tree or a given
883 walk recursively through the directory tree or a given
884 changeset, finding all files matched by the match
884 changeset, finding all files matched by the match
885 function
885 function
886
886
887 results are yielded in a tuple (src, filename), where src
887 results are yielded in a tuple (src, filename), where src
888 is one of:
888 is one of:
889 'f' the file was found in the directory tree
889 'f' the file was found in the directory tree
890 'm' the file was only in the dirstate and not in the tree
890 'm' the file was only in the dirstate and not in the tree
891 'b' file was not found and matched badmatch
891 'b' file was not found and matched badmatch
892 '''
892 '''
893
893
894 if node:
894 if node:
895 fdict = dict.fromkeys(files)
895 fdict = dict.fromkeys(files)
896 # for dirstate.walk, files=['.'] means "walk the whole tree".
896 # for dirstate.walk, files=['.'] means "walk the whole tree".
897 # follow that here, too
897 # follow that here, too
898 fdict.pop('.', None)
898 fdict.pop('.', None)
899 mdict = self.manifest.read(self.changelog.read(node)[0])
899 mdict = self.manifest.read(self.changelog.read(node)[0])
900 mfiles = mdict.keys()
900 mfiles = mdict.keys()
901 mfiles.sort()
901 mfiles.sort()
902 for fn in mfiles:
902 for fn in mfiles:
903 for ffn in fdict:
903 for ffn in fdict:
904 # match if the file is the exact name or a directory
904 # match if the file is the exact name or a directory
905 if ffn == fn or fn.startswith("%s/" % ffn):
905 if ffn == fn or fn.startswith("%s/" % ffn):
906 del fdict[ffn]
906 del fdict[ffn]
907 break
907 break
908 if match(fn):
908 if match(fn):
909 yield 'm', fn
909 yield 'm', fn
910 ffiles = fdict.keys()
910 ffiles = fdict.keys()
911 ffiles.sort()
911 ffiles.sort()
912 for fn in ffiles:
912 for fn in ffiles:
913 if badmatch and badmatch(fn):
913 if badmatch and badmatch(fn):
914 if match(fn):
914 if match(fn):
915 yield 'b', fn
915 yield 'b', fn
916 else:
916 else:
917 self.ui.warn(_('%s: No such file in rev %s\n')
917 self.ui.warn(_('%s: No such file in rev %s\n')
918 % (self.pathto(fn), short(node)))
918 % (self.pathto(fn), short(node)))
919 else:
919 else:
920 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
920 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
921 yield src, fn
921 yield src, fn
922
922
923 def status(self, node1=None, node2=None, files=[], match=util.always,
923 def status(self, node1=None, node2=None, files=[], match=util.always,
924 list_ignored=False, list_clean=False):
924 list_ignored=False, list_clean=False):
925 """return status of files between two nodes or node and working directory
925 """return status of files between two nodes or node and working directory
926
926
927 If node1 is None, use the first dirstate parent instead.
927 If node1 is None, use the first dirstate parent instead.
928 If node2 is None, compare node1 with working directory.
928 If node2 is None, compare node1 with working directory.
929 """
929 """
930
930
931 def fcmp(fn, getnode):
931 def fcmp(fn, getnode):
932 t1 = self.wread(fn)
932 t1 = self.wread(fn)
933 return self.file(fn).cmp(getnode(fn), t1)
933 return self.file(fn).cmp(getnode(fn), t1)
934
934
935 def mfmatches(node):
935 def mfmatches(node):
936 change = self.changelog.read(node)
936 change = self.changelog.read(node)
937 mf = self.manifest.read(change[0]).copy()
937 mf = self.manifest.read(change[0]).copy()
938 for fn in mf.keys():
938 for fn in mf.keys():
939 if not match(fn):
939 if not match(fn):
940 del mf[fn]
940 del mf[fn]
941 return mf
941 return mf
942
942
943 modified, added, removed, deleted, unknown = [], [], [], [], []
943 modified, added, removed, deleted, unknown = [], [], [], [], []
944 ignored, clean = [], []
944 ignored, clean = [], []
945
945
946 compareworking = False
946 compareworking = False
947 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
947 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
948 compareworking = True
948 compareworking = True
949
949
950 if not compareworking:
950 if not compareworking:
951 # read the manifest from node1 before the manifest from node2,
951 # read the manifest from node1 before the manifest from node2,
952 # so that we'll hit the manifest cache if we're going through
952 # so that we'll hit the manifest cache if we're going through
953 # all the revisions in parent->child order.
953 # all the revisions in parent->child order.
954 mf1 = mfmatches(node1)
954 mf1 = mfmatches(node1)
955
955
956 # are we comparing the working directory?
956 # are we comparing the working directory?
957 if not node2:
957 if not node2:
958 (lookup, modified, added, removed, deleted, unknown,
958 (lookup, modified, added, removed, deleted, unknown,
959 ignored, clean) = self.dirstate.status(files, match,
959 ignored, clean) = self.dirstate.status(files, match,
960 list_ignored, list_clean)
960 list_ignored, list_clean)
961
961
962 # are we comparing working dir against its parent?
962 # are we comparing working dir against its parent?
963 if compareworking:
963 if compareworking:
964 if lookup:
964 if lookup:
965 fixup = []
965 fixup = []
966 # do a full compare of any files that might have changed
966 # do a full compare of any files that might have changed
967 ctx = self.changectx()
967 ctx = self.changectx()
968 for f in lookup:
968 for f in lookup:
969 if f not in ctx or ctx[f].cmp(self.wread(f)):
969 if f not in ctx or ctx[f].cmp(self.wread(f)):
970 modified.append(f)
970 modified.append(f)
971 else:
971 else:
972 fixup.append(f)
972 fixup.append(f)
973 if list_clean:
973 if list_clean:
974 clean.append(f)
974 clean.append(f)
975
975
976 # update dirstate for files that are actually clean
976 # update dirstate for files that are actually clean
977 if fixup:
977 if fixup:
978 wlock = None
978 wlock = None
979 try:
979 try:
980 try:
980 try:
981 wlock = self.wlock(False)
981 wlock = self.wlock(False)
982 except lock.LockException:
982 except lock.LockException:
983 pass
983 pass
984 if wlock:
984 if wlock:
985 for f in fixup:
985 for f in fixup:
986 self.dirstate.normal(f)
986 self.dirstate.normal(f)
987 finally:
987 finally:
988 del wlock
988 del wlock
989 else:
989 else:
990 # we are comparing working dir against non-parent
990 # we are comparing working dir against non-parent
991 # generate a pseudo-manifest for the working dir
991 # generate a pseudo-manifest for the working dir
992 # XXX: create it in dirstate.py ?
992 # XXX: create it in dirstate.py ?
993 mf2 = mfmatches(self.dirstate.parents()[0])
993 mf2 = mfmatches(self.dirstate.parents()[0])
994 is_exec = util.execfunc(self.root, mf2.execf)
994 is_exec = util.execfunc(self.root, mf2.execf)
995 is_link = util.linkfunc(self.root, mf2.linkf)
995 is_link = util.linkfunc(self.root, mf2.linkf)
996 for f in lookup + modified + added:
996 for f in lookup + modified + added:
997 mf2[f] = ""
997 mf2[f] = ""
998 mf2.set(f, is_exec(f), is_link(f))
998 mf2.set(f, is_exec(f), is_link(f))
999 for f in removed:
999 for f in removed:
1000 if f in mf2:
1000 if f in mf2:
1001 del mf2[f]
1001 del mf2[f]
1002
1002
1003 else:
1003 else:
1004 # we are comparing two revisions
1004 # we are comparing two revisions
1005 mf2 = mfmatches(node2)
1005 mf2 = mfmatches(node2)
1006
1006
1007 if not compareworking:
1007 if not compareworking:
1008 # flush lists from dirstate before comparing manifests
1008 # flush lists from dirstate before comparing manifests
1009 modified, added, clean = [], [], []
1009 modified, added, clean = [], [], []
1010
1010
1011 # make sure to sort the files so we talk to the disk in a
1011 # make sure to sort the files so we talk to the disk in a
1012 # reasonable order
1012 # reasonable order
1013 mf2keys = mf2.keys()
1013 mf2keys = mf2.keys()
1014 mf2keys.sort()
1014 mf2keys.sort()
1015 getnode = lambda fn: mf1.get(fn, nullid)
1015 getnode = lambda fn: mf1.get(fn, nullid)
1016 for fn in mf2keys:
1016 for fn in mf2keys:
1017 if fn in mf1:
1017 if fn in mf1:
1018 if (mf1.flags(fn) != mf2.flags(fn) or
1018 if (mf1.flags(fn) != mf2.flags(fn) or
1019 (mf1[fn] != mf2[fn] and
1019 (mf1[fn] != mf2[fn] and
1020 (mf2[fn] != "" or fcmp(fn, getnode)))):
1020 (mf2[fn] != "" or fcmp(fn, getnode)))):
1021 modified.append(fn)
1021 modified.append(fn)
1022 elif list_clean:
1022 elif list_clean:
1023 clean.append(fn)
1023 clean.append(fn)
1024 del mf1[fn]
1024 del mf1[fn]
1025 else:
1025 else:
1026 added.append(fn)
1026 added.append(fn)
1027
1027
1028 removed = mf1.keys()
1028 removed = mf1.keys()
1029
1029
1030 # sort and return results:
1030 # sort and return results:
1031 for l in modified, added, removed, deleted, unknown, ignored, clean:
1031 for l in modified, added, removed, deleted, unknown, ignored, clean:
1032 l.sort()
1032 l.sort()
1033 return (modified, added, removed, deleted, unknown, ignored, clean)
1033 return (modified, added, removed, deleted, unknown, ignored, clean)
1034
1034
1035 def add(self, list):
1035 def add(self, list):
1036 wlock = self.wlock()
1036 wlock = self.wlock()
1037 try:
1037 try:
1038 rejected = []
1038 rejected = []
1039 for f in list:
1039 for f in list:
1040 p = self.wjoin(f)
1040 p = self.wjoin(f)
1041 try:
1041 try:
1042 st = os.lstat(p)
1042 st = os.lstat(p)
1043 except:
1043 except:
1044 self.ui.warn(_("%s does not exist!\n") % f)
1044 self.ui.warn(_("%s does not exist!\n") % f)
1045 rejected.append(f)
1045 rejected.append(f)
1046 continue
1046 continue
1047 if st.st_size > 10000000:
1047 if st.st_size > 10000000:
1048 self.ui.warn(_("%s: files over 10MB may cause memory and"
1048 self.ui.warn(_("%s: files over 10MB may cause memory and"
1049 " performance problems\n"
1049 " performance problems\n"
1050 "(use 'hg revert %s' to unadd the file)\n")
1050 "(use 'hg revert %s' to unadd the file)\n")
1051 % (f, f))
1051 % (f, f))
1052 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1052 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1053 self.ui.warn(_("%s not added: only files and symlinks "
1053 self.ui.warn(_("%s not added: only files and symlinks "
1054 "supported currently\n") % f)
1054 "supported currently\n") % f)
1055 rejected.append(p)
1055 rejected.append(p)
1056 elif self.dirstate[f] in 'amn':
1056 elif self.dirstate[f] in 'amn':
1057 self.ui.warn(_("%s already tracked!\n") % f)
1057 self.ui.warn(_("%s already tracked!\n") % f)
1058 elif self.dirstate[f] == 'r':
1058 elif self.dirstate[f] == 'r':
1059 self.dirstate.normallookup(f)
1059 self.dirstate.normallookup(f)
1060 else:
1060 else:
1061 self.dirstate.add(f)
1061 self.dirstate.add(f)
1062 return rejected
1062 return rejected
1063 finally:
1063 finally:
1064 del wlock
1064 del wlock
1065
1065
1066 def forget(self, list):
1066 def forget(self, list):
1067 wlock = self.wlock()
1067 wlock = self.wlock()
1068 try:
1068 try:
1069 for f in list:
1069 for f in list:
1070 if self.dirstate[f] != 'a':
1070 if self.dirstate[f] != 'a':
1071 self.ui.warn(_("%s not added!\n") % f)
1071 self.ui.warn(_("%s not added!\n") % f)
1072 else:
1072 else:
1073 self.dirstate.forget(f)
1073 self.dirstate.forget(f)
1074 finally:
1074 finally:
1075 del wlock
1075 del wlock
1076
1076
1077 def remove(self, list, unlink=False):
1077 def remove(self, list, unlink=False):
1078 wlock = None
1078 wlock = None
1079 try:
1079 try:
1080 if unlink:
1080 if unlink:
1081 for f in list:
1081 for f in list:
1082 try:
1082 try:
1083 util.unlink(self.wjoin(f))
1083 util.unlink(self.wjoin(f))
1084 except OSError, inst:
1084 except OSError, inst:
1085 if inst.errno != errno.ENOENT:
1085 if inst.errno != errno.ENOENT:
1086 raise
1086 raise
1087 wlock = self.wlock()
1087 wlock = self.wlock()
1088 for f in list:
1088 for f in list:
1089 if unlink and os.path.exists(self.wjoin(f)):
1089 if unlink and os.path.exists(self.wjoin(f)):
1090 self.ui.warn(_("%s still exists!\n") % f)
1090 self.ui.warn(_("%s still exists!\n") % f)
1091 elif self.dirstate[f] == 'a':
1091 elif self.dirstate[f] == 'a':
1092 self.dirstate.forget(f)
1092 self.dirstate.forget(f)
1093 elif f not in self.dirstate:
1093 elif f not in self.dirstate:
1094 self.ui.warn(_("%s not tracked!\n") % f)
1094 self.ui.warn(_("%s not tracked!\n") % f)
1095 else:
1095 else:
1096 self.dirstate.remove(f)
1096 self.dirstate.remove(f)
1097 finally:
1097 finally:
1098 del wlock
1098 del wlock
1099
1099
1100 def undelete(self, list):
1100 def undelete(self, list):
1101 wlock = None
1101 wlock = None
1102 try:
1102 try:
1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1104 for p in self.dirstate.parents() if p != nullid]
1104 for p in self.dirstate.parents() if p != nullid]
1105 wlock = self.wlock()
1105 wlock = self.wlock()
1106 for f in list:
1106 for f in list:
1107 if self.dirstate[f] != 'r':
1107 if self.dirstate[f] != 'r':
1108 self.ui.warn("%s not removed!\n" % f)
1108 self.ui.warn("%s not removed!\n" % f)
1109 else:
1109 else:
1110 m = f in manifests[0] and manifests[0] or manifests[1]
1110 m = f in manifests[0] and manifests[0] or manifests[1]
1111 t = self.file(f).read(m[f])
1111 t = self.file(f).read(m[f])
1112 self.wwrite(f, t, m.flags(f))
1112 self.wwrite(f, t, m.flags(f))
1113 self.dirstate.normal(f)
1113 self.dirstate.normal(f)
1114 finally:
1114 finally:
1115 del wlock
1115 del wlock
1116
1116
1117 def copy(self, source, dest):
1117 def copy(self, source, dest):
1118 wlock = None
1118 wlock = None
1119 try:
1119 try:
1120 p = self.wjoin(dest)
1120 p = self.wjoin(dest)
1121 if not (os.path.exists(p) or os.path.islink(p)):
1121 if not (os.path.exists(p) or os.path.islink(p)):
1122 self.ui.warn(_("%s does not exist!\n") % dest)
1122 self.ui.warn(_("%s does not exist!\n") % dest)
1123 elif not (os.path.isfile(p) or os.path.islink(p)):
1123 elif not (os.path.isfile(p) or os.path.islink(p)):
1124 self.ui.warn(_("copy failed: %s is not a file or a "
1124 self.ui.warn(_("copy failed: %s is not a file or a "
1125 "symbolic link\n") % dest)
1125 "symbolic link\n") % dest)
1126 else:
1126 else:
1127 wlock = self.wlock()
1127 wlock = self.wlock()
1128 if dest not in self.dirstate:
1128 if dest not in self.dirstate:
1129 self.dirstate.add(dest)
1129 self.dirstate.add(dest)
1130 self.dirstate.copy(source, dest)
1130 self.dirstate.copy(source, dest)
1131 finally:
1131 finally:
1132 del wlock
1132 del wlock
1133
1133
1134 def heads(self, start=None):
1134 def heads(self, start=None):
1135 heads = self.changelog.heads(start)
1135 heads = self.changelog.heads(start)
1136 # sort the output in rev descending order
1136 # sort the output in rev descending order
1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1138 heads.sort()
1138 heads.sort()
1139 return [n for (r, n) in heads]
1139 return [n for (r, n) in heads]
1140
1140
1141 def branchheads(self, branch, start=None):
1141 def branchheads(self, branch, start=None):
1142 branches = self.branchtags()
1142 branches = self.branchtags()
1143 if branch not in branches:
1143 if branch not in branches:
1144 return []
1144 return []
1145 # The basic algorithm is this:
1145 # The basic algorithm is this:
1146 #
1146 #
1147 # Start from the branch tip since there are no later revisions that can
1147 # Start from the branch tip since there are no later revisions that can
1148 # possibly be in this branch, and the tip is a guaranteed head.
1148 # possibly be in this branch, and the tip is a guaranteed head.
1149 #
1149 #
1150 # Remember the tip's parents as the first ancestors, since these by
1150 # Remember the tip's parents as the first ancestors, since these by
1151 # definition are not heads.
1151 # definition are not heads.
1152 #
1152 #
1153 # Step backwards from the brach tip through all the revisions. We are
1153 # Step backwards from the brach tip through all the revisions. We are
1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1155 # nodes in reverse topological order (children before parents).
1155 # nodes in reverse topological order (children before parents).
1156 #
1156 #
1157 # If a revision is one of the ancestors of a head then we can toss it
1157 # If a revision is one of the ancestors of a head then we can toss it
1158 # out of the ancestors set (we've already found it and won't be
1158 # out of the ancestors set (we've already found it and won't be
1159 # visiting it again) and put its parents in the ancestors set.
1159 # visiting it again) and put its parents in the ancestors set.
1160 #
1160 #
1161 # Otherwise, if a revision is in the branch it's another head, since it
1161 # Otherwise, if a revision is in the branch it's another head, since it
1162 # wasn't in the ancestor list of an existing head. So add it to the
1162 # wasn't in the ancestor list of an existing head. So add it to the
1163 # head list, and add its parents to the ancestor list.
1163 # head list, and add its parents to the ancestor list.
1164 #
1164 #
1165 # If it is not in the branch ignore it.
1165 # If it is not in the branch ignore it.
1166 #
1166 #
1167 # Once we have a list of heads, use nodesbetween to filter out all the
1167 # Once we have a list of heads, use nodesbetween to filter out all the
1168 # heads that cannot be reached from startrev. There may be a more
1168 # heads that cannot be reached from startrev. There may be a more
1169 # efficient way to do this as part of the previous algorithm.
1169 # efficient way to do this as part of the previous algorithm.
1170
1170
1171 set = util.set
1171 set = util.set
1172 heads = [self.changelog.rev(branches[branch])]
1172 heads = [self.changelog.rev(branches[branch])]
1173 # Don't care if ancestors contains nullrev or not.
1173 # Don't care if ancestors contains nullrev or not.
1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1176 if rev in ancestors:
1176 if rev in ancestors:
1177 ancestors.update(self.changelog.parentrevs(rev))
1177 ancestors.update(self.changelog.parentrevs(rev))
1178 ancestors.remove(rev)
1178 ancestors.remove(rev)
1179 elif self.changectx(rev).branch() == branch:
1179 elif self.changectx(rev).branch() == branch:
1180 heads.append(rev)
1180 heads.append(rev)
1181 ancestors.update(self.changelog.parentrevs(rev))
1181 ancestors.update(self.changelog.parentrevs(rev))
1182 heads = [self.changelog.node(rev) for rev in heads]
1182 heads = [self.changelog.node(rev) for rev in heads]
1183 if start is not None:
1183 if start is not None:
1184 heads = self.changelog.nodesbetween([start], heads)[2]
1184 heads = self.changelog.nodesbetween([start], heads)[2]
1185 return heads
1185 return heads
1186
1186
1187 def branches(self, nodes):
1187 def branches(self, nodes):
1188 if not nodes:
1188 if not nodes:
1189 nodes = [self.changelog.tip()]
1189 nodes = [self.changelog.tip()]
1190 b = []
1190 b = []
1191 for n in nodes:
1191 for n in nodes:
1192 t = n
1192 t = n
1193 while 1:
1193 while 1:
1194 p = self.changelog.parents(n)
1194 p = self.changelog.parents(n)
1195 if p[1] != nullid or p[0] == nullid:
1195 if p[1] != nullid or p[0] == nullid:
1196 b.append((t, n, p[0], p[1]))
1196 b.append((t, n, p[0], p[1]))
1197 break
1197 break
1198 n = p[0]
1198 n = p[0]
1199 return b
1199 return b
1200
1200
1201 def between(self, pairs):
1201 def between(self, pairs):
1202 r = []
1202 r = []
1203
1203
1204 for top, bottom in pairs:
1204 for top, bottom in pairs:
1205 n, l, i = top, [], 0
1205 n, l, i = top, [], 0
1206 f = 1
1206 f = 1
1207
1207
1208 while n != bottom:
1208 while n != bottom:
1209 p = self.changelog.parents(n)[0]
1209 p = self.changelog.parents(n)[0]
1210 if i == f:
1210 if i == f:
1211 l.append(n)
1211 l.append(n)
1212 f = f * 2
1212 f = f * 2
1213 n = p
1213 n = p
1214 i += 1
1214 i += 1
1215
1215
1216 r.append(l)
1216 r.append(l)
1217
1217
1218 return r
1218 return r
1219
1219
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1221 """Return list of roots of the subsets of missing nodes from remote
1221 """Return list of roots of the subsets of missing nodes from remote
1222
1222
1223 If base dict is specified, assume that these nodes and their parents
1223 If base dict is specified, assume that these nodes and their parents
1224 exist on the remote side and that no child of a node of base exists
1224 exist on the remote side and that no child of a node of base exists
1225 in both remote and self.
1225 in both remote and self.
1226 Furthermore base will be updated to include the nodes that exists
1226 Furthermore base will be updated to include the nodes that exists
1227 in self and remote but no children exists in self and remote.
1227 in self and remote but no children exists in self and remote.
1228 If a list of heads is specified, return only nodes which are heads
1228 If a list of heads is specified, return only nodes which are heads
1229 or ancestors of these heads.
1229 or ancestors of these heads.
1230
1230
1231 All the ancestors of base are in self and in remote.
1231 All the ancestors of base are in self and in remote.
1232 All the descendants of the list returned are missing in self.
1232 All the descendants of the list returned are missing in self.
1233 (and so we know that the rest of the nodes are missing in remote, see
1233 (and so we know that the rest of the nodes are missing in remote, see
1234 outgoing)
1234 outgoing)
1235 """
1235 """
1236 m = self.changelog.nodemap
1236 m = self.changelog.nodemap
1237 search = []
1237 search = []
1238 fetch = {}
1238 fetch = {}
1239 seen = {}
1239 seen = {}
1240 seenbranch = {}
1240 seenbranch = {}
1241 if base == None:
1241 if base == None:
1242 base = {}
1242 base = {}
1243
1243
1244 if not heads:
1244 if not heads:
1245 heads = remote.heads()
1245 heads = remote.heads()
1246
1246
1247 if self.changelog.tip() == nullid:
1247 if self.changelog.tip() == nullid:
1248 base[nullid] = 1
1248 base[nullid] = 1
1249 if heads != [nullid]:
1249 if heads != [nullid]:
1250 return [nullid]
1250 return [nullid]
1251 return []
1251 return []
1252
1252
1253 # assume we're closer to the tip than the root
1253 # assume we're closer to the tip than the root
1254 # and start by examining the heads
1254 # and start by examining the heads
1255 self.ui.status(_("searching for changes\n"))
1255 self.ui.status(_("searching for changes\n"))
1256
1256
1257 unknown = []
1257 unknown = []
1258 for h in heads:
1258 for h in heads:
1259 if h not in m:
1259 if h not in m:
1260 unknown.append(h)
1260 unknown.append(h)
1261 else:
1261 else:
1262 base[h] = 1
1262 base[h] = 1
1263
1263
1264 if not unknown:
1264 if not unknown:
1265 return []
1265 return []
1266
1266
1267 req = dict.fromkeys(unknown)
1267 req = dict.fromkeys(unknown)
1268 reqcnt = 0
1268 reqcnt = 0
1269
1269
1270 # search through remote branches
1270 # search through remote branches
1271 # a 'branch' here is a linear segment of history, with four parts:
1271 # a 'branch' here is a linear segment of history, with four parts:
1272 # head, root, first parent, second parent
1272 # head, root, first parent, second parent
1273 # (a branch always has two parents (or none) by definition)
1273 # (a branch always has two parents (or none) by definition)
1274 unknown = remote.branches(unknown)
1274 unknown = remote.branches(unknown)
1275 while unknown:
1275 while unknown:
1276 r = []
1276 r = []
1277 while unknown:
1277 while unknown:
1278 n = unknown.pop(0)
1278 n = unknown.pop(0)
1279 if n[0] in seen:
1279 if n[0] in seen:
1280 continue
1280 continue
1281
1281
1282 self.ui.debug(_("examining %s:%s\n")
1282 self.ui.debug(_("examining %s:%s\n")
1283 % (short(n[0]), short(n[1])))
1283 % (short(n[0]), short(n[1])))
1284 if n[0] == nullid: # found the end of the branch
1284 if n[0] == nullid: # found the end of the branch
1285 pass
1285 pass
1286 elif n in seenbranch:
1286 elif n in seenbranch:
1287 self.ui.debug(_("branch already found\n"))
1287 self.ui.debug(_("branch already found\n"))
1288 continue
1288 continue
1289 elif n[1] and n[1] in m: # do we know the base?
1289 elif n[1] and n[1] in m: # do we know the base?
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 % (short(n[0]), short(n[1])))
1291 % (short(n[0]), short(n[1])))
1292 search.append(n) # schedule branch range for scanning
1292 search.append(n) # schedule branch range for scanning
1293 seenbranch[n] = 1
1293 seenbranch[n] = 1
1294 else:
1294 else:
1295 if n[1] not in seen and n[1] not in fetch:
1295 if n[1] not in seen and n[1] not in fetch:
1296 if n[2] in m and n[3] in m:
1296 if n[2] in m and n[3] in m:
1297 self.ui.debug(_("found new changeset %s\n") %
1297 self.ui.debug(_("found new changeset %s\n") %
1298 short(n[1]))
1298 short(n[1]))
1299 fetch[n[1]] = 1 # earliest unknown
1299 fetch[n[1]] = 1 # earliest unknown
1300 for p in n[2:4]:
1300 for p in n[2:4]:
1301 if p in m:
1301 if p in m:
1302 base[p] = 1 # latest known
1302 base[p] = 1 # latest known
1303
1303
1304 for p in n[2:4]:
1304 for p in n[2:4]:
1305 if p not in req and p not in m:
1305 if p not in req and p not in m:
1306 r.append(p)
1306 r.append(p)
1307 req[p] = 1
1307 req[p] = 1
1308 seen[n[0]] = 1
1308 seen[n[0]] = 1
1309
1309
1310 if r:
1310 if r:
1311 reqcnt += 1
1311 reqcnt += 1
1312 self.ui.debug(_("request %d: %s\n") %
1312 self.ui.debug(_("request %d: %s\n") %
1313 (reqcnt, " ".join(map(short, r))))
1313 (reqcnt, " ".join(map(short, r))))
1314 for p in xrange(0, len(r), 10):
1314 for p in xrange(0, len(r), 10):
1315 for b in remote.branches(r[p:p+10]):
1315 for b in remote.branches(r[p:p+10]):
1316 self.ui.debug(_("received %s:%s\n") %
1316 self.ui.debug(_("received %s:%s\n") %
1317 (short(b[0]), short(b[1])))
1317 (short(b[0]), short(b[1])))
1318 unknown.append(b)
1318 unknown.append(b)
1319
1319
1320 # do binary search on the branches we found
1320 # do binary search on the branches we found
1321 while search:
1321 while search:
1322 n = search.pop(0)
1322 n = search.pop(0)
1323 reqcnt += 1
1323 reqcnt += 1
1324 l = remote.between([(n[0], n[1])])[0]
1324 l = remote.between([(n[0], n[1])])[0]
1325 l.append(n[1])
1325 l.append(n[1])
1326 p = n[0]
1326 p = n[0]
1327 f = 1
1327 f = 1
1328 for i in l:
1328 for i in l:
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 if i in m:
1330 if i in m:
1331 if f <= 2:
1331 if f <= 2:
1332 self.ui.debug(_("found new branch changeset %s\n") %
1332 self.ui.debug(_("found new branch changeset %s\n") %
1333 short(p))
1333 short(p))
1334 fetch[p] = 1
1334 fetch[p] = 1
1335 base[i] = 1
1335 base[i] = 1
1336 else:
1336 else:
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 % (short(p), short(i)))
1338 % (short(p), short(i)))
1339 search.append((p, i))
1339 search.append((p, i))
1340 break
1340 break
1341 p, f = i, f * 2
1341 p, f = i, f * 2
1342
1342
1343 # sanity check our fetch list
1343 # sanity check our fetch list
1344 for f in fetch.keys():
1344 for f in fetch.keys():
1345 if f in m:
1345 if f in m:
1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1347
1347
1348 if base.keys() == [nullid]:
1348 if base.keys() == [nullid]:
1349 if force:
1349 if force:
1350 self.ui.warn(_("warning: repository is unrelated\n"))
1350 self.ui.warn(_("warning: repository is unrelated\n"))
1351 else:
1351 else:
1352 raise util.Abort(_("repository is unrelated"))
1352 raise util.Abort(_("repository is unrelated"))
1353
1353
1354 self.ui.debug(_("found new changesets starting at ") +
1354 self.ui.debug(_("found new changesets starting at ") +
1355 " ".join([short(f) for f in fetch]) + "\n")
1355 " ".join([short(f) for f in fetch]) + "\n")
1356
1356
1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1358
1358
1359 return fetch.keys()
1359 return fetch.keys()
1360
1360
1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1362 """Return list of nodes that are roots of subsets not in remote
1362 """Return list of nodes that are roots of subsets not in remote
1363
1363
1364 If base dict is specified, assume that these nodes and their parents
1364 If base dict is specified, assume that these nodes and their parents
1365 exist on the remote side.
1365 exist on the remote side.
1366 If a list of heads is specified, return only nodes which are heads
1366 If a list of heads is specified, return only nodes which are heads
1367 or ancestors of these heads, and return a second element which
1367 or ancestors of these heads, and return a second element which
1368 contains all remote heads which get new children.
1368 contains all remote heads which get new children.
1369 """
1369 """
1370 if base == None:
1370 if base == None:
1371 base = {}
1371 base = {}
1372 self.findincoming(remote, base, heads, force=force)
1372 self.findincoming(remote, base, heads, force=force)
1373
1373
1374 self.ui.debug(_("common changesets up to ")
1374 self.ui.debug(_("common changesets up to ")
1375 + " ".join(map(short, base.keys())) + "\n")
1375 + " ".join(map(short, base.keys())) + "\n")
1376
1376
1377 remain = dict.fromkeys(self.changelog.nodemap)
1377 remain = dict.fromkeys(self.changelog.nodemap)
1378
1378
1379 # prune everything remote has from the tree
1379 # prune everything remote has from the tree
1380 del remain[nullid]
1380 del remain[nullid]
1381 remove = base.keys()
1381 remove = base.keys()
1382 while remove:
1382 while remove:
1383 n = remove.pop(0)
1383 n = remove.pop(0)
1384 if n in remain:
1384 if n in remain:
1385 del remain[n]
1385 del remain[n]
1386 for p in self.changelog.parents(n):
1386 for p in self.changelog.parents(n):
1387 remove.append(p)
1387 remove.append(p)
1388
1388
1389 # find every node whose parents have been pruned
1389 # find every node whose parents have been pruned
1390 subset = []
1390 subset = []
1391 # find every remote head that will get new children
1391 # find every remote head that will get new children
1392 updated_heads = {}
1392 updated_heads = {}
1393 for n in remain:
1393 for n in remain:
1394 p1, p2 = self.changelog.parents(n)
1394 p1, p2 = self.changelog.parents(n)
1395 if p1 not in remain and p2 not in remain:
1395 if p1 not in remain and p2 not in remain:
1396 subset.append(n)
1396 subset.append(n)
1397 if heads:
1397 if heads:
1398 if p1 in heads:
1398 if p1 in heads:
1399 updated_heads[p1] = True
1399 updated_heads[p1] = True
1400 if p2 in heads:
1400 if p2 in heads:
1401 updated_heads[p2] = True
1401 updated_heads[p2] = True
1402
1402
1403 # this is the set of all roots we have to push
1403 # this is the set of all roots we have to push
1404 if heads:
1404 if heads:
1405 return subset, updated_heads.keys()
1405 return subset, updated_heads.keys()
1406 else:
1406 else:
1407 return subset
1407 return subset
1408
1408
1409 def pull(self, remote, heads=None, force=False):
1409 def pull(self, remote, heads=None, force=False):
1410 lock = self.lock()
1410 lock = self.lock()
1411 try:
1411 try:
1412 fetch = self.findincoming(remote, heads=heads, force=force)
1412 fetch = self.findincoming(remote, heads=heads, force=force)
1413 if fetch == [nullid]:
1413 if fetch == [nullid]:
1414 self.ui.status(_("requesting all changes\n"))
1414 self.ui.status(_("requesting all changes\n"))
1415
1415
1416 if not fetch:
1416 if not fetch:
1417 self.ui.status(_("no changes found\n"))
1417 self.ui.status(_("no changes found\n"))
1418 return 0
1418 return 0
1419
1419
1420 if heads is None:
1420 if heads is None:
1421 cg = remote.changegroup(fetch, 'pull')
1421 cg = remote.changegroup(fetch, 'pull')
1422 else:
1422 else:
1423 if 'changegroupsubset' not in remote.capabilities:
1423 if 'changegroupsubset' not in remote.capabilities:
1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1426 return self.addchangegroup(cg, 'pull', remote.url())
1426 return self.addchangegroup(cg, 'pull', remote.url())
1427 finally:
1427 finally:
1428 del lock
1428 del lock
1429
1429
1430 def push(self, remote, force=False, revs=None):
1430 def push(self, remote, force=False, revs=None):
1431 # there are two ways to push to remote repo:
1431 # there are two ways to push to remote repo:
1432 #
1432 #
1433 # addchangegroup assumes local user can lock remote
1433 # addchangegroup assumes local user can lock remote
1434 # repo (local filesystem, old ssh servers).
1434 # repo (local filesystem, old ssh servers).
1435 #
1435 #
1436 # unbundle assumes local user cannot lock remote repo (new ssh
1436 # unbundle assumes local user cannot lock remote repo (new ssh
1437 # servers, http servers).
1437 # servers, http servers).
1438
1438
1439 if remote.capable('unbundle'):
1439 if remote.capable('unbundle'):
1440 return self.push_unbundle(remote, force, revs)
1440 return self.push_unbundle(remote, force, revs)
1441 return self.push_addchangegroup(remote, force, revs)
1441 return self.push_addchangegroup(remote, force, revs)
1442
1442
1443 def prepush(self, remote, force, revs):
1443 def prepush(self, remote, force, revs):
1444 base = {}
1444 base = {}
1445 remote_heads = remote.heads()
1445 remote_heads = remote.heads()
1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1447
1447
1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1449 if revs is not None:
1449 if revs is not None:
1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1451 else:
1451 else:
1452 bases, heads = update, self.changelog.heads()
1452 bases, heads = update, self.changelog.heads()
1453
1453
1454 if not bases:
1454 if not bases:
1455 self.ui.status(_("no changes found\n"))
1455 self.ui.status(_("no changes found\n"))
1456 return None, 1
1456 return None, 1
1457 elif not force:
1457 elif not force:
1458 # check if we're creating new remote heads
1458 # check if we're creating new remote heads
1459 # to be a remote head after push, node must be either
1459 # to be a remote head after push, node must be either
1460 # - unknown locally
1460 # - unknown locally
1461 # - a local outgoing head descended from update
1461 # - a local outgoing head descended from update
1462 # - a remote head that's known locally and not
1462 # - a remote head that's known locally and not
1463 # ancestral to an outgoing head
1463 # ancestral to an outgoing head
1464
1464
1465 warn = 0
1465 warn = 0
1466
1466
1467 if remote_heads == [nullid]:
1467 if remote_heads == [nullid]:
1468 warn = 0
1468 warn = 0
1469 elif not revs and len(heads) > len(remote_heads):
1469 elif not revs and len(heads) > len(remote_heads):
1470 warn = 1
1470 warn = 1
1471 else:
1471 else:
1472 newheads = list(heads)
1472 newheads = list(heads)
1473 for r in remote_heads:
1473 for r in remote_heads:
1474 if r in self.changelog.nodemap:
1474 if r in self.changelog.nodemap:
1475 desc = self.changelog.heads(r, heads)
1475 desc = self.changelog.heads(r, heads)
1476 l = [h for h in heads if h in desc]
1476 l = [h for h in heads if h in desc]
1477 if not l:
1477 if not l:
1478 newheads.append(r)
1478 newheads.append(r)
1479 else:
1479 else:
1480 newheads.append(r)
1480 newheads.append(r)
1481 if len(newheads) > len(remote_heads):
1481 if len(newheads) > len(remote_heads):
1482 warn = 1
1482 warn = 1
1483
1483
1484 if warn:
1484 if warn:
1485 self.ui.warn(_("abort: push creates new remote branches!\n"))
1485 self.ui.warn(_("abort: push creates new remote branches!\n"))
1486 self.ui.status(_("(did you forget to merge?"
1486 self.ui.status(_("(did you forget to merge?"
1487 " use push -f to force)\n"))
1487 " use push -f to force)\n"))
1488 return None, 1
1488 return None, 1
1489 elif inc:
1489 elif inc:
1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1491
1491
1492
1492
1493 if revs is None:
1493 if revs is None:
1494 cg = self.changegroup(update, 'push')
1494 cg = self.changegroup(update, 'push')
1495 else:
1495 else:
1496 cg = self.changegroupsubset(update, revs, 'push')
1496 cg = self.changegroupsubset(update, revs, 'push')
1497 return cg, remote_heads
1497 return cg, remote_heads
1498
1498
1499 def push_addchangegroup(self, remote, force, revs):
1499 def push_addchangegroup(self, remote, force, revs):
1500 lock = remote.lock()
1500 lock = remote.lock()
1501 try:
1501 try:
1502 ret = self.prepush(remote, force, revs)
1502 ret = self.prepush(remote, force, revs)
1503 if ret[0] is not None:
1503 if ret[0] is not None:
1504 cg, remote_heads = ret
1504 cg, remote_heads = ret
1505 return remote.addchangegroup(cg, 'push', self.url())
1505 return remote.addchangegroup(cg, 'push', self.url())
1506 return ret[1]
1506 return ret[1]
1507 finally:
1507 finally:
1508 del lock
1508 del lock
1509
1509
1510 def push_unbundle(self, remote, force, revs):
1510 def push_unbundle(self, remote, force, revs):
1511 # local repo finds heads on server, finds out what revs it
1511 # local repo finds heads on server, finds out what revs it
1512 # must push. once revs transferred, if server finds it has
1512 # must push. once revs transferred, if server finds it has
1513 # different heads (someone else won commit/push race), server
1513 # different heads (someone else won commit/push race), server
1514 # aborts.
1514 # aborts.
1515
1515
1516 ret = self.prepush(remote, force, revs)
1516 ret = self.prepush(remote, force, revs)
1517 if ret[0] is not None:
1517 if ret[0] is not None:
1518 cg, remote_heads = ret
1518 cg, remote_heads = ret
1519 if force: remote_heads = ['force']
1519 if force: remote_heads = ['force']
1520 return remote.unbundle(cg, remote_heads, 'push')
1520 return remote.unbundle(cg, remote_heads, 'push')
1521 return ret[1]
1521 return ret[1]
1522
1522
1523 def changegroupinfo(self, nodes, source):
1523 def changegroupinfo(self, nodes, source):
1524 if self.ui.verbose or source == 'bundle':
1524 if self.ui.verbose or source == 'bundle':
1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1526 if self.ui.debugflag:
1526 if self.ui.debugflag:
1527 self.ui.debug(_("List of changesets:\n"))
1527 self.ui.debug(_("List of changesets:\n"))
1528 for node in nodes:
1528 for node in nodes:
1529 self.ui.debug("%s\n" % hex(node))
1529 self.ui.debug("%s\n" % hex(node))
1530
1530
1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1532 """This function generates a changegroup consisting of all the nodes
1532 """This function generates a changegroup consisting of all the nodes
1533 that are descendents of any of the bases, and ancestors of any of
1533 that are descendents of any of the bases, and ancestors of any of
1534 the heads.
1534 the heads.
1535
1535
1536 It is fairly complex as determining which filenodes and which
1536 It is fairly complex as determining which filenodes and which
1537 manifest nodes need to be included for the changeset to be complete
1537 manifest nodes need to be included for the changeset to be complete
1538 is non-trivial.
1538 is non-trivial.
1539
1539
1540 Another wrinkle is doing the reverse, figuring out which changeset in
1540 Another wrinkle is doing the reverse, figuring out which changeset in
1541 the changegroup a particular filenode or manifestnode belongs to.
1541 the changegroup a particular filenode or manifestnode belongs to.
1542
1542
1543 The caller can specify some nodes that must be included in the
1543 The caller can specify some nodes that must be included in the
1544 changegroup using the extranodes argument. It should be a dict
1544 changegroup using the extranodes argument. It should be a dict
1545 where the keys are the filenames (or 1 for the manifest), and the
1545 where the keys are the filenames (or 1 for the manifest), and the
1546 values are lists of (node, linknode) tuples, where node is a wanted
1546 values are lists of (node, linknode) tuples, where node is a wanted
1547 node and linknode is the changelog node that should be transmitted as
1547 node and linknode is the changelog node that should be transmitted as
1548 the linkrev.
1548 the linkrev.
1549 """
1549 """
1550
1550
1551 self.hook('preoutgoing', throw=True, source=source)
1551 self.hook('preoutgoing', throw=True, source=source)
1552
1552
1553 # Set up some initial variables
1553 # Set up some initial variables
1554 # Make it easy to refer to self.changelog
1554 # Make it easy to refer to self.changelog
1555 cl = self.changelog
1555 cl = self.changelog
1556 # msng is short for missing - compute the list of changesets in this
1556 # msng is short for missing - compute the list of changesets in this
1557 # changegroup.
1557 # changegroup.
1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1559 self.changegroupinfo(msng_cl_lst, source)
1559 self.changegroupinfo(msng_cl_lst, source)
1560 # Some bases may turn out to be superfluous, and some heads may be
1560 # Some bases may turn out to be superfluous, and some heads may be
1561 # too. nodesbetween will return the minimal set of bases and heads
1561 # too. nodesbetween will return the minimal set of bases and heads
1562 # necessary to re-create the changegroup.
1562 # necessary to re-create the changegroup.
1563
1563
1564 # Known heads are the list of heads that it is assumed the recipient
1564 # Known heads are the list of heads that it is assumed the recipient
1565 # of this changegroup will know about.
1565 # of this changegroup will know about.
1566 knownheads = {}
1566 knownheads = {}
1567 # We assume that all parents of bases are known heads.
1567 # We assume that all parents of bases are known heads.
1568 for n in bases:
1568 for n in bases:
1569 for p in cl.parents(n):
1569 for p in cl.parents(n):
1570 if p != nullid:
1570 if p != nullid:
1571 knownheads[p] = 1
1571 knownheads[p] = 1
1572 knownheads = knownheads.keys()
1572 knownheads = knownheads.keys()
1573 if knownheads:
1573 if knownheads:
1574 # Now that we know what heads are known, we can compute which
1574 # Now that we know what heads are known, we can compute which
1575 # changesets are known. The recipient must know about all
1575 # changesets are known. The recipient must know about all
1576 # changesets required to reach the known heads from the null
1576 # changesets required to reach the known heads from the null
1577 # changeset.
1577 # changeset.
1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1579 junk = None
1579 junk = None
1580 # Transform the list into an ersatz set.
1580 # Transform the list into an ersatz set.
1581 has_cl_set = dict.fromkeys(has_cl_set)
1581 has_cl_set = dict.fromkeys(has_cl_set)
1582 else:
1582 else:
1583 # If there were no known heads, the recipient cannot be assumed to
1583 # If there were no known heads, the recipient cannot be assumed to
1584 # know about any changesets.
1584 # know about any changesets.
1585 has_cl_set = {}
1585 has_cl_set = {}
1586
1586
1587 # Make it easy to refer to self.manifest
1587 # Make it easy to refer to self.manifest
1588 mnfst = self.manifest
1588 mnfst = self.manifest
1589 # We don't know which manifests are missing yet
1589 # We don't know which manifests are missing yet
1590 msng_mnfst_set = {}
1590 msng_mnfst_set = {}
1591 # Nor do we know which filenodes are missing.
1591 # Nor do we know which filenodes are missing.
1592 msng_filenode_set = {}
1592 msng_filenode_set = {}
1593
1593
1594 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1594 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1595 junk = None
1595 junk = None
1596
1596
1597 # A changeset always belongs to itself, so the changenode lookup
1597 # A changeset always belongs to itself, so the changenode lookup
1598 # function for a changenode is identity.
1598 # function for a changenode is identity.
1599 def identity(x):
1599 def identity(x):
1600 return x
1600 return x
1601
1601
1602 # A function generating function. Sets up an environment for the
1602 # A function generating function. Sets up an environment for the
1603 # inner function.
1603 # inner function.
1604 def cmp_by_rev_func(revlog):
1604 def cmp_by_rev_func(revlog):
1605 # Compare two nodes by their revision number in the environment's
1605 # Compare two nodes by their revision number in the environment's
1606 # revision history. Since the revision number both represents the
1606 # revision history. Since the revision number both represents the
1607 # most efficient order to read the nodes in, and represents a
1607 # most efficient order to read the nodes in, and represents a
1608 # topological sorting of the nodes, this function is often useful.
1608 # topological sorting of the nodes, this function is often useful.
1609 def cmp_by_rev(a, b):
1609 def cmp_by_rev(a, b):
1610 return cmp(revlog.rev(a), revlog.rev(b))
1610 return cmp(revlog.rev(a), revlog.rev(b))
1611 return cmp_by_rev
1611 return cmp_by_rev
1612
1612
1613 # If we determine that a particular file or manifest node must be a
1613 # If we determine that a particular file or manifest node must be a
1614 # node that the recipient of the changegroup will already have, we can
1614 # node that the recipient of the changegroup will already have, we can
1615 # also assume the recipient will have all the parents. This function
1615 # also assume the recipient will have all the parents. This function
1616 # prunes them from the set of missing nodes.
1616 # prunes them from the set of missing nodes.
1617 def prune_parents(revlog, hasset, msngset):
1617 def prune_parents(revlog, hasset, msngset):
1618 haslst = hasset.keys()
1618 haslst = hasset.keys()
1619 haslst.sort(cmp_by_rev_func(revlog))
1619 haslst.sort(cmp_by_rev_func(revlog))
1620 for node in haslst:
1620 for node in haslst:
1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1622 while parentlst:
1622 while parentlst:
1623 n = parentlst.pop()
1623 n = parentlst.pop()
1624 if n not in hasset:
1624 if n not in hasset:
1625 hasset[n] = 1
1625 hasset[n] = 1
1626 p = [p for p in revlog.parents(n) if p != nullid]
1626 p = [p for p in revlog.parents(n) if p != nullid]
1627 parentlst.extend(p)
1627 parentlst.extend(p)
1628 for n in hasset:
1628 for n in hasset:
1629 msngset.pop(n, None)
1629 msngset.pop(n, None)
1630
1630
1631 # This is a function generating function used to set up an environment
1631 # This is a function generating function used to set up an environment
1632 # for the inner function to execute in.
1632 # for the inner function to execute in.
1633 def manifest_and_file_collector(changedfileset):
1633 def manifest_and_file_collector(changedfileset):
1634 # This is an information gathering function that gathers
1634 # This is an information gathering function that gathers
1635 # information from each changeset node that goes out as part of
1635 # information from each changeset node that goes out as part of
1636 # the changegroup. The information gathered is a list of which
1636 # the changegroup. The information gathered is a list of which
1637 # manifest nodes are potentially required (the recipient may
1637 # manifest nodes are potentially required (the recipient may
1638 # already have them) and total list of all files which were
1638 # already have them) and total list of all files which were
1639 # changed in any changeset in the changegroup.
1639 # changed in any changeset in the changegroup.
1640 #
1640 #
1641 # We also remember the first changenode we saw any manifest
1641 # We also remember the first changenode we saw any manifest
1642 # referenced by so we can later determine which changenode 'owns'
1642 # referenced by so we can later determine which changenode 'owns'
1643 # the manifest.
1643 # the manifest.
1644 def collect_manifests_and_files(clnode):
1644 def collect_manifests_and_files(clnode):
1645 c = cl.read(clnode)
1645 c = cl.read(clnode)
1646 for f in c[3]:
1646 for f in c[3]:
1647 # This is to make sure we only have one instance of each
1647 # This is to make sure we only have one instance of each
1648 # filename string for each filename.
1648 # filename string for each filename.
1649 changedfileset.setdefault(f, f)
1649 changedfileset.setdefault(f, f)
1650 msng_mnfst_set.setdefault(c[0], clnode)
1650 msng_mnfst_set.setdefault(c[0], clnode)
1651 return collect_manifests_and_files
1651 return collect_manifests_and_files
1652
1652
1653 # Figure out which manifest nodes (of the ones we think might be part
1653 # Figure out which manifest nodes (of the ones we think might be part
1654 # of the changegroup) the recipient must know about and remove them
1654 # of the changegroup) the recipient must know about and remove them
1655 # from the changegroup.
1655 # from the changegroup.
1656 def prune_manifests():
1656 def prune_manifests():
1657 has_mnfst_set = {}
1657 has_mnfst_set = {}
1658 for n in msng_mnfst_set:
1658 for n in msng_mnfst_set:
1659 # If a 'missing' manifest thinks it belongs to a changenode
1659 # If a 'missing' manifest thinks it belongs to a changenode
1660 # the recipient is assumed to have, obviously the recipient
1660 # the recipient is assumed to have, obviously the recipient
1661 # must have that manifest.
1661 # must have that manifest.
1662 linknode = cl.node(mnfst.linkrev(n))
1662 linknode = cl.node(mnfst.linkrev(n))
1663 if linknode in has_cl_set:
1663 if linknode in has_cl_set:
1664 has_mnfst_set[n] = 1
1664 has_mnfst_set[n] = 1
1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1666
1666
1667 # Use the information collected in collect_manifests_and_files to say
1667 # Use the information collected in collect_manifests_and_files to say
1668 # which changenode any manifestnode belongs to.
1668 # which changenode any manifestnode belongs to.
1669 def lookup_manifest_link(mnfstnode):
1669 def lookup_manifest_link(mnfstnode):
1670 return msng_mnfst_set[mnfstnode]
1670 return msng_mnfst_set[mnfstnode]
1671
1671
1672 # A function generating function that sets up the initial environment
1672 # A function generating function that sets up the initial environment
1673 # the inner function.
1673 # the inner function.
1674 def filenode_collector(changedfiles):
1674 def filenode_collector(changedfiles):
1675 next_rev = [0]
1675 next_rev = [0]
1676 # This gathers information from each manifestnode included in the
1676 # This gathers information from each manifestnode included in the
1677 # changegroup about which filenodes the manifest node references
1677 # changegroup about which filenodes the manifest node references
1678 # so we can include those in the changegroup too.
1678 # so we can include those in the changegroup too.
1679 #
1679 #
1680 # It also remembers which changenode each filenode belongs to. It
1680 # It also remembers which changenode each filenode belongs to. It
1681 # does this by assuming the a filenode belongs to the changenode
1681 # does this by assuming the a filenode belongs to the changenode
1682 # the first manifest that references it belongs to.
1682 # the first manifest that references it belongs to.
1683 def collect_msng_filenodes(mnfstnode):
1683 def collect_msng_filenodes(mnfstnode):
1684 r = mnfst.rev(mnfstnode)
1684 r = mnfst.rev(mnfstnode)
1685 if r == next_rev[0]:
1685 if r == next_rev[0]:
1686 # If the last rev we looked at was the one just previous,
1686 # If the last rev we looked at was the one just previous,
1687 # we only need to see a diff.
1687 # we only need to see a diff.
1688 deltamf = mnfst.readdelta(mnfstnode)
1688 deltamf = mnfst.readdelta(mnfstnode)
1689 # For each line in the delta
1689 # For each line in the delta
1690 for f, fnode in deltamf.items():
1690 for f, fnode in deltamf.items():
1691 f = changedfiles.get(f, None)
1691 f = changedfiles.get(f, None)
1692 # And if the file is in the list of files we care
1692 # And if the file is in the list of files we care
1693 # about.
1693 # about.
1694 if f is not None:
1694 if f is not None:
1695 # Get the changenode this manifest belongs to
1695 # Get the changenode this manifest belongs to
1696 clnode = msng_mnfst_set[mnfstnode]
1696 clnode = msng_mnfst_set[mnfstnode]
1697 # Create the set of filenodes for the file if
1697 # Create the set of filenodes for the file if
1698 # there isn't one already.
1698 # there isn't one already.
1699 ndset = msng_filenode_set.setdefault(f, {})
1699 ndset = msng_filenode_set.setdefault(f, {})
1700 # And set the filenode's changelog node to the
1700 # And set the filenode's changelog node to the
1701 # manifest's if it hasn't been set already.
1701 # manifest's if it hasn't been set already.
1702 ndset.setdefault(fnode, clnode)
1702 ndset.setdefault(fnode, clnode)
1703 else:
1703 else:
1704 # Otherwise we need a full manifest.
1704 # Otherwise we need a full manifest.
1705 m = mnfst.read(mnfstnode)
1705 m = mnfst.read(mnfstnode)
1706 # For every file in we care about.
1706 # For every file in we care about.
1707 for f in changedfiles:
1707 for f in changedfiles:
1708 fnode = m.get(f, None)
1708 fnode = m.get(f, None)
1709 # If it's in the manifest
1709 # If it's in the manifest
1710 if fnode is not None:
1710 if fnode is not None:
1711 # See comments above.
1711 # See comments above.
1712 clnode = msng_mnfst_set[mnfstnode]
1712 clnode = msng_mnfst_set[mnfstnode]
1713 ndset = msng_filenode_set.setdefault(f, {})
1713 ndset = msng_filenode_set.setdefault(f, {})
1714 ndset.setdefault(fnode, clnode)
1714 ndset.setdefault(fnode, clnode)
1715 # Remember the revision we hope to see next.
1715 # Remember the revision we hope to see next.
1716 next_rev[0] = r + 1
1716 next_rev[0] = r + 1
1717 return collect_msng_filenodes
1717 return collect_msng_filenodes
1718
1718
1719 # We have a list of filenodes we think we need for a file, lets remove
1719 # We have a list of filenodes we think we need for a file, lets remove
1720 # all those we now the recipient must have.
1720 # all those we now the recipient must have.
1721 def prune_filenodes(f, filerevlog):
1721 def prune_filenodes(f, filerevlog):
1722 msngset = msng_filenode_set[f]
1722 msngset = msng_filenode_set[f]
1723 hasset = {}
1723 hasset = {}
1724 # If a 'missing' filenode thinks it belongs to a changenode we
1724 # If a 'missing' filenode thinks it belongs to a changenode we
1725 # assume the recipient must have, then the recipient must have
1725 # assume the recipient must have, then the recipient must have
1726 # that filenode.
1726 # that filenode.
1727 for n in msngset:
1727 for n in msngset:
1728 clnode = cl.node(filerevlog.linkrev(n))
1728 clnode = cl.node(filerevlog.linkrev(n))
1729 if clnode in has_cl_set:
1729 if clnode in has_cl_set:
1730 hasset[n] = 1
1730 hasset[n] = 1
1731 prune_parents(filerevlog, hasset, msngset)
1731 prune_parents(filerevlog, hasset, msngset)
1732
1732
1733 # A function generator function that sets up the a context for the
1733 # A function generator function that sets up the a context for the
1734 # inner function.
1734 # inner function.
1735 def lookup_filenode_link_func(fname):
1735 def lookup_filenode_link_func(fname):
1736 msngset = msng_filenode_set[fname]
1736 msngset = msng_filenode_set[fname]
1737 # Lookup the changenode the filenode belongs to.
1737 # Lookup the changenode the filenode belongs to.
1738 def lookup_filenode_link(fnode):
1738 def lookup_filenode_link(fnode):
1739 return msngset[fnode]
1739 return msngset[fnode]
1740 return lookup_filenode_link
1740 return lookup_filenode_link
1741
1741
1742 # Add the nodes that were explicitly requested.
1742 # Add the nodes that were explicitly requested.
1743 def add_extra_nodes(name, nodes):
1743 def add_extra_nodes(name, nodes):
1744 if not extranodes or name not in extranodes:
1744 if not extranodes or name not in extranodes:
1745 return
1745 return
1746
1746
1747 for node, linknode in extranodes[name]:
1747 for node, linknode in extranodes[name]:
1748 if node not in nodes:
1748 if node not in nodes:
1749 nodes[node] = linknode
1749 nodes[node] = linknode
1750
1750
1751 # Now that we have all theses utility functions to help out and
1751 # Now that we have all theses utility functions to help out and
1752 # logically divide up the task, generate the group.
1752 # logically divide up the task, generate the group.
1753 def gengroup():
1753 def gengroup():
1754 # The set of changed files starts empty.
1754 # The set of changed files starts empty.
1755 changedfiles = {}
1755 changedfiles = {}
1756 # Create a changenode group generator that will call our functions
1756 # Create a changenode group generator that will call our functions
1757 # back to lookup the owning changenode and collect information.
1757 # back to lookup the owning changenode and collect information.
1758 group = cl.group(msng_cl_lst, identity,
1758 group = cl.group(msng_cl_lst, identity,
1759 manifest_and_file_collector(changedfiles))
1759 manifest_and_file_collector(changedfiles))
1760 for chnk in group:
1760 for chnk in group:
1761 yield chnk
1761 yield chnk
1762
1762
1763 # The list of manifests has been collected by the generator
1763 # The list of manifests has been collected by the generator
1764 # calling our functions back.
1764 # calling our functions back.
1765 prune_manifests()
1765 prune_manifests()
1766 add_extra_nodes(1, msng_mnfst_set)
1766 add_extra_nodes(1, msng_mnfst_set)
1767 msng_mnfst_lst = msng_mnfst_set.keys()
1767 msng_mnfst_lst = msng_mnfst_set.keys()
1768 # Sort the manifestnodes by revision number.
1768 # Sort the manifestnodes by revision number.
1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1770 # Create a generator for the manifestnodes that calls our lookup
1770 # Create a generator for the manifestnodes that calls our lookup
1771 # and data collection functions back.
1771 # and data collection functions back.
1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1773 filenode_collector(changedfiles))
1773 filenode_collector(changedfiles))
1774 for chnk in group:
1774 for chnk in group:
1775 yield chnk
1775 yield chnk
1776
1776
1777 # These are no longer needed, dereference and toss the memory for
1777 # These are no longer needed, dereference and toss the memory for
1778 # them.
1778 # them.
1779 msng_mnfst_lst = None
1779 msng_mnfst_lst = None
1780 msng_mnfst_set.clear()
1780 msng_mnfst_set.clear()
1781
1781
1782 if extranodes:
1782 if extranodes:
1783 for fname in extranodes:
1783 for fname in extranodes:
1784 if isinstance(fname, int):
1784 if isinstance(fname, int):
1785 continue
1785 continue
1786 add_extra_nodes(fname,
1786 add_extra_nodes(fname,
1787 msng_filenode_set.setdefault(fname, {}))
1787 msng_filenode_set.setdefault(fname, {}))
1788 changedfiles[fname] = 1
1788 changedfiles[fname] = 1
1789 changedfiles = changedfiles.keys()
1789 changedfiles = changedfiles.keys()
1790 changedfiles.sort()
1790 changedfiles.sort()
1791 # Go through all our files in order sorted by name.
1791 # Go through all our files in order sorted by name.
1792 for fname in changedfiles:
1792 for fname in changedfiles:
1793 filerevlog = self.file(fname)
1793 filerevlog = self.file(fname)
1794 if filerevlog.count() == 0:
1794 if filerevlog.count() == 0:
1795 raise util.Abort(_("empty or missing revlog for %s") % fname)
1795 raise util.Abort(_("empty or missing revlog for %s") % fname)
1796 # Toss out the filenodes that the recipient isn't really
1796 # Toss out the filenodes that the recipient isn't really
1797 # missing.
1797 # missing.
1798 if fname in msng_filenode_set:
1798 if fname in msng_filenode_set:
1799 prune_filenodes(fname, filerevlog)
1799 prune_filenodes(fname, filerevlog)
1800 msng_filenode_lst = msng_filenode_set[fname].keys()
1800 msng_filenode_lst = msng_filenode_set[fname].keys()
1801 else:
1801 else:
1802 msng_filenode_lst = []
1802 msng_filenode_lst = []
1803 # If any filenodes are left, generate the group for them,
1803 # If any filenodes are left, generate the group for them,
1804 # otherwise don't bother.
1804 # otherwise don't bother.
1805 if len(msng_filenode_lst) > 0:
1805 if len(msng_filenode_lst) > 0:
1806 yield changegroup.chunkheader(len(fname))
1806 yield changegroup.chunkheader(len(fname))
1807 yield fname
1807 yield fname
1808 # Sort the filenodes by their revision #
1808 # Sort the filenodes by their revision #
1809 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1809 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1810 # Create a group generator and only pass in a changenode
1810 # Create a group generator and only pass in a changenode
1811 # lookup function as we need to collect no information
1811 # lookup function as we need to collect no information
1812 # from filenodes.
1812 # from filenodes.
1813 group = filerevlog.group(msng_filenode_lst,
1813 group = filerevlog.group(msng_filenode_lst,
1814 lookup_filenode_link_func(fname))
1814 lookup_filenode_link_func(fname))
1815 for chnk in group:
1815 for chnk in group:
1816 yield chnk
1816 yield chnk
1817 if fname in msng_filenode_set:
1817 if fname in msng_filenode_set:
1818 # Don't need this anymore, toss it to free memory.
1818 # Don't need this anymore, toss it to free memory.
1819 del msng_filenode_set[fname]
1819 del msng_filenode_set[fname]
1820 # Signal that no more groups are left.
1820 # Signal that no more groups are left.
1821 yield changegroup.closechunk()
1821 yield changegroup.closechunk()
1822
1822
1823 if msng_cl_lst:
1823 if msng_cl_lst:
1824 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1824 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1825
1825
1826 return util.chunkbuffer(gengroup())
1826 return util.chunkbuffer(gengroup())
1827
1827
1828 def changegroup(self, basenodes, source):
1828 def changegroup(self, basenodes, source):
1829 """Generate a changegroup of all nodes that we have that a recipient
1829 """Generate a changegroup of all nodes that we have that a recipient
1830 doesn't.
1830 doesn't.
1831
1831
1832 This is much easier than the previous function as we can assume that
1832 This is much easier than the previous function as we can assume that
1833 the recipient has any changenode we aren't sending them."""
1833 the recipient has any changenode we aren't sending them."""
1834
1834
1835 self.hook('preoutgoing', throw=True, source=source)
1835 self.hook('preoutgoing', throw=True, source=source)
1836
1836
1837 cl = self.changelog
1837 cl = self.changelog
1838 nodes = cl.nodesbetween(basenodes, None)[0]
1838 nodes = cl.nodesbetween(basenodes, None)[0]
1839 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1839 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1840 self.changegroupinfo(nodes, source)
1840 self.changegroupinfo(nodes, source)
1841
1841
1842 def identity(x):
1842 def identity(x):
1843 return x
1843 return x
1844
1844
1845 def gennodelst(revlog):
1845 def gennodelst(revlog):
1846 for r in xrange(0, revlog.count()):
1846 for r in xrange(0, revlog.count()):
1847 n = revlog.node(r)
1847 n = revlog.node(r)
1848 if revlog.linkrev(n) in revset:
1848 if revlog.linkrev(n) in revset:
1849 yield n
1849 yield n
1850
1850
1851 def changed_file_collector(changedfileset):
1851 def changed_file_collector(changedfileset):
1852 def collect_changed_files(clnode):
1852 def collect_changed_files(clnode):
1853 c = cl.read(clnode)
1853 c = cl.read(clnode)
1854 for fname in c[3]:
1854 for fname in c[3]:
1855 changedfileset[fname] = 1
1855 changedfileset[fname] = 1
1856 return collect_changed_files
1856 return collect_changed_files
1857
1857
1858 def lookuprevlink_func(revlog):
1858 def lookuprevlink_func(revlog):
1859 def lookuprevlink(n):
1859 def lookuprevlink(n):
1860 return cl.node(revlog.linkrev(n))
1860 return cl.node(revlog.linkrev(n))
1861 return lookuprevlink
1861 return lookuprevlink
1862
1862
1863 def gengroup():
1863 def gengroup():
1864 # construct a list of all changed files
1864 # construct a list of all changed files
1865 changedfiles = {}
1865 changedfiles = {}
1866
1866
1867 for chnk in cl.group(nodes, identity,
1867 for chnk in cl.group(nodes, identity,
1868 changed_file_collector(changedfiles)):
1868 changed_file_collector(changedfiles)):
1869 yield chnk
1869 yield chnk
1870 changedfiles = changedfiles.keys()
1870 changedfiles = changedfiles.keys()
1871 changedfiles.sort()
1871 changedfiles.sort()
1872
1872
1873 mnfst = self.manifest
1873 mnfst = self.manifest
1874 nodeiter = gennodelst(mnfst)
1874 nodeiter = gennodelst(mnfst)
1875 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1875 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1876 yield chnk
1876 yield chnk
1877
1877
1878 for fname in changedfiles:
1878 for fname in changedfiles:
1879 filerevlog = self.file(fname)
1879 filerevlog = self.file(fname)
1880 if filerevlog.count() == 0:
1880 if filerevlog.count() == 0:
1881 raise util.Abort(_("empty or missing revlog for %s") % fname)
1881 raise util.Abort(_("empty or missing revlog for %s") % fname)
1882 nodeiter = gennodelst(filerevlog)
1882 nodeiter = gennodelst(filerevlog)
1883 nodeiter = list(nodeiter)
1883 nodeiter = list(nodeiter)
1884 if nodeiter:
1884 if nodeiter:
1885 yield changegroup.chunkheader(len(fname))
1885 yield changegroup.chunkheader(len(fname))
1886 yield fname
1886 yield fname
1887 lookup = lookuprevlink_func(filerevlog)
1887 lookup = lookuprevlink_func(filerevlog)
1888 for chnk in filerevlog.group(nodeiter, lookup):
1888 for chnk in filerevlog.group(nodeiter, lookup):
1889 yield chnk
1889 yield chnk
1890
1890
1891 yield changegroup.closechunk()
1891 yield changegroup.closechunk()
1892
1892
1893 if nodes:
1893 if nodes:
1894 self.hook('outgoing', node=hex(nodes[0]), source=source)
1894 self.hook('outgoing', node=hex(nodes[0]), source=source)
1895
1895
1896 return util.chunkbuffer(gengroup())
1896 return util.chunkbuffer(gengroup())
1897
1897
1898 def addchangegroup(self, source, srctype, url, emptyok=False):
1898 def addchangegroup(self, source, srctype, url, emptyok=False):
1899 """add changegroup to repo.
1899 """add changegroup to repo.
1900
1900
1901 return values:
1901 return values:
1902 - nothing changed or no source: 0
1902 - nothing changed or no source: 0
1903 - more heads than before: 1+added heads (2..n)
1903 - more heads than before: 1+added heads (2..n)
1904 - less heads than before: -1-removed heads (-2..-n)
1904 - less heads than before: -1-removed heads (-2..-n)
1905 - number of heads stays the same: 1
1905 - number of heads stays the same: 1
1906 """
1906 """
1907 def csmap(x):
1907 def csmap(x):
1908 self.ui.debug(_("add changeset %s\n") % short(x))
1908 self.ui.debug(_("add changeset %s\n") % short(x))
1909 return cl.count()
1909 return cl.count()
1910
1910
1911 def revmap(x):
1911 def revmap(x):
1912 return cl.rev(x)
1912 return cl.rev(x)
1913
1913
1914 if not source:
1914 if not source:
1915 return 0
1915 return 0
1916
1916
1917 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1917 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1918
1918
1919 changesets = files = revisions = 0
1919 changesets = files = revisions = 0
1920
1920
1921 # write changelog data to temp files so concurrent readers will not see
1921 # write changelog data to temp files so concurrent readers will not see
1922 # inconsistent view
1922 # inconsistent view
1923 cl = self.changelog
1923 cl = self.changelog
1924 cl.delayupdate()
1924 cl.delayupdate()
1925 oldheads = len(cl.heads())
1925 oldheads = len(cl.heads())
1926
1926
1927 tr = self.transaction()
1927 tr = self.transaction()
1928 try:
1928 try:
1929 trp = weakref.proxy(tr)
1929 trp = weakref.proxy(tr)
1930 # pull off the changeset group
1930 # pull off the changeset group
1931 self.ui.status(_("adding changesets\n"))
1931 self.ui.status(_("adding changesets\n"))
1932 cor = cl.count() - 1
1932 cor = cl.count() - 1
1933 chunkiter = changegroup.chunkiter(source)
1933 chunkiter = changegroup.chunkiter(source)
1934 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1934 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1935 raise util.Abort(_("received changelog group is empty"))
1935 raise util.Abort(_("received changelog group is empty"))
1936 cnr = cl.count() - 1
1936 cnr = cl.count() - 1
1937 changesets = cnr - cor
1937 changesets = cnr - cor
1938
1938
1939 # pull off the manifest group
1939 # pull off the manifest group
1940 self.ui.status(_("adding manifests\n"))
1940 self.ui.status(_("adding manifests\n"))
1941 chunkiter = changegroup.chunkiter(source)
1941 chunkiter = changegroup.chunkiter(source)
1942 # no need to check for empty manifest group here:
1942 # no need to check for empty manifest group here:
1943 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1943 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1944 # no new manifest will be created and the manifest group will
1944 # no new manifest will be created and the manifest group will
1945 # be empty during the pull
1945 # be empty during the pull
1946 self.manifest.addgroup(chunkiter, revmap, trp)
1946 self.manifest.addgroup(chunkiter, revmap, trp)
1947
1947
1948 # process the files
1948 # process the files
1949 self.ui.status(_("adding file changes\n"))
1949 self.ui.status(_("adding file changes\n"))
1950 while 1:
1950 while 1:
1951 f = changegroup.getchunk(source)
1951 f = changegroup.getchunk(source)
1952 if not f:
1952 if not f:
1953 break
1953 break
1954 self.ui.debug(_("adding %s revisions\n") % f)
1954 self.ui.debug(_("adding %s revisions\n") % f)
1955 fl = self.file(f)
1955 fl = self.file(f)
1956 o = fl.count()
1956 o = fl.count()
1957 chunkiter = changegroup.chunkiter(source)
1957 chunkiter = changegroup.chunkiter(source)
1958 if fl.addgroup(chunkiter, revmap, trp) is None:
1958 if fl.addgroup(chunkiter, revmap, trp) is None:
1959 raise util.Abort(_("received file revlog group is empty"))
1959 raise util.Abort(_("received file revlog group is empty"))
1960 revisions += fl.count() - o
1960 revisions += fl.count() - o
1961 files += 1
1961 files += 1
1962
1962
1963 # make changelog see real files again
1963 # make changelog see real files again
1964 cl.finalize(trp)
1964 cl.finalize(trp)
1965
1965
1966 newheads = len(self.changelog.heads())
1966 newheads = len(self.changelog.heads())
1967 heads = ""
1967 heads = ""
1968 if oldheads and newheads != oldheads:
1968 if oldheads and newheads != oldheads:
1969 heads = _(" (%+d heads)") % (newheads - oldheads)
1969 heads = _(" (%+d heads)") % (newheads - oldheads)
1970
1970
1971 self.ui.status(_("added %d changesets"
1971 self.ui.status(_("added %d changesets"
1972 " with %d changes to %d files%s\n")
1972 " with %d changes to %d files%s\n")
1973 % (changesets, revisions, files, heads))
1973 % (changesets, revisions, files, heads))
1974
1974
1975 if changesets > 0:
1975 if changesets > 0:
1976 self.hook('pretxnchangegroup', throw=True,
1976 self.hook('pretxnchangegroup', throw=True,
1977 node=hex(self.changelog.node(cor+1)), source=srctype,
1977 node=hex(self.changelog.node(cor+1)), source=srctype,
1978 url=url)
1978 url=url)
1979
1979
1980 tr.close()
1980 tr.close()
1981 finally:
1981 finally:
1982 del tr
1982 del tr
1983
1983
1984 if changesets > 0:
1984 if changesets > 0:
1985 # forcefully update the on-disk branch cache
1985 # forcefully update the on-disk branch cache
1986 self.ui.debug(_("updating the branch cache\n"))
1986 self.ui.debug(_("updating the branch cache\n"))
1987 self.branchcache = None
1987 self.branchcache = None
1988 self.branchtags()
1988 self.branchtags()
1989 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1989 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1990 source=srctype, url=url)
1990 source=srctype, url=url)
1991
1991
1992 for i in xrange(cor + 1, cnr + 1):
1992 for i in xrange(cor + 1, cnr + 1):
1993 self.hook("incoming", node=hex(self.changelog.node(i)),
1993 self.hook("incoming", node=hex(self.changelog.node(i)),
1994 source=srctype, url=url)
1994 source=srctype, url=url)
1995
1995
1996 # never return 0 here:
1996 # never return 0 here:
1997 if newheads < oldheads:
1997 if newheads < oldheads:
1998 return newheads - oldheads - 1
1998 return newheads - oldheads - 1
1999 else:
1999 else:
2000 return newheads - oldheads + 1
2000 return newheads - oldheads + 1
2001
2001
2002
2002
2003 def stream_in(self, remote):
2003 def stream_in(self, remote):
2004 fp = remote.stream_out()
2004 fp = remote.stream_out()
2005 l = fp.readline()
2005 l = fp.readline()
2006 try:
2006 try:
2007 resp = int(l)
2007 resp = int(l)
2008 except ValueError:
2008 except ValueError:
2009 raise util.UnexpectedOutput(
2009 raise util.UnexpectedOutput(
2010 _('Unexpected response from remote server:'), l)
2010 _('Unexpected response from remote server:'), l)
2011 if resp == 1:
2011 if resp == 1:
2012 raise util.Abort(_('operation forbidden by server'))
2012 raise util.Abort(_('operation forbidden by server'))
2013 elif resp == 2:
2013 elif resp == 2:
2014 raise util.Abort(_('locking the remote repository failed'))
2014 raise util.Abort(_('locking the remote repository failed'))
2015 elif resp != 0:
2015 elif resp != 0:
2016 raise util.Abort(_('the server sent an unknown error code'))
2016 raise util.Abort(_('the server sent an unknown error code'))
2017 self.ui.status(_('streaming all changes\n'))
2017 self.ui.status(_('streaming all changes\n'))
2018 l = fp.readline()
2018 l = fp.readline()
2019 try:
2019 try:
2020 total_files, total_bytes = map(int, l.split(' ', 1))
2020 total_files, total_bytes = map(int, l.split(' ', 1))
2021 except ValueError, TypeError:
2021 except ValueError, TypeError:
2022 raise util.UnexpectedOutput(
2022 raise util.UnexpectedOutput(
2023 _('Unexpected response from remote server:'), l)
2023 _('Unexpected response from remote server:'), l)
2024 self.ui.status(_('%d files to transfer, %s of data\n') %
2024 self.ui.status(_('%d files to transfer, %s of data\n') %
2025 (total_files, util.bytecount(total_bytes)))
2025 (total_files, util.bytecount(total_bytes)))
2026 start = time.time()
2026 start = time.time()
2027 for i in xrange(total_files):
2027 for i in xrange(total_files):
2028 # XXX doesn't support '\n' or '\r' in filenames
2028 # XXX doesn't support '\n' or '\r' in filenames
2029 l = fp.readline()
2029 l = fp.readline()
2030 try:
2030 try:
2031 name, size = l.split('\0', 1)
2031 name, size = l.split('\0', 1)
2032 size = int(size)
2032 size = int(size)
2033 except ValueError, TypeError:
2033 except ValueError, TypeError:
2034 raise util.UnexpectedOutput(
2034 raise util.UnexpectedOutput(
2035 _('Unexpected response from remote server:'), l)
2035 _('Unexpected response from remote server:'), l)
2036 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2036 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2037 ofp = self.sopener(name, 'w')
2037 ofp = self.sopener(name, 'w')
2038 for chunk in util.filechunkiter(fp, limit=size):
2038 for chunk in util.filechunkiter(fp, limit=size):
2039 ofp.write(chunk)
2039 ofp.write(chunk)
2040 ofp.close()
2040 ofp.close()
2041 elapsed = time.time() - start
2041 elapsed = time.time() - start
2042 if elapsed <= 0:
2042 if elapsed <= 0:
2043 elapsed = 0.001
2043 elapsed = 0.001
2044 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2044 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2045 (util.bytecount(total_bytes), elapsed,
2045 (util.bytecount(total_bytes), elapsed,
2046 util.bytecount(total_bytes / elapsed)))
2046 util.bytecount(total_bytes / elapsed)))
2047 self.invalidate()
2047 self.invalidate()
2048 return len(self.heads()) + 1
2048 return len(self.heads()) + 1
2049
2049
2050 def clone(self, remote, heads=[], stream=False):
2050 def clone(self, remote, heads=[], stream=False):
2051 '''clone remote repository.
2051 '''clone remote repository.
2052
2052
2053 keyword arguments:
2053 keyword arguments:
2054 heads: list of revs to clone (forces use of pull)
2054 heads: list of revs to clone (forces use of pull)
2055 stream: use streaming clone if possible'''
2055 stream: use streaming clone if possible'''
2056
2056
2057 # now, all clients that can request uncompressed clones can
2057 # now, all clients that can request uncompressed clones can
2058 # read repo formats supported by all servers that can serve
2058 # read repo formats supported by all servers that can serve
2059 # them.
2059 # them.
2060
2060
2061 # if revlog format changes, client will have to check version
2061 # if revlog format changes, client will have to check version
2062 # and format flags on "stream" capability, and use
2062 # and format flags on "stream" capability, and use
2063 # uncompressed only if compatible.
2063 # uncompressed only if compatible.
2064
2064
2065 if stream and not heads and remote.capable('stream'):
2065 if stream and not heads and remote.capable('stream'):
2066 return self.stream_in(remote)
2066 return self.stream_in(remote)
2067 return self.pull(remote, heads)
2067 return self.pull(remote, heads)
2068
2068
2069 # used to avoid circular references so destructors work
2069 # used to avoid circular references so destructors work
2070 def aftertrans(files):
2070 def aftertrans(files):
2071 renamefiles = [tuple(t) for t in files]
2071 renamefiles = [tuple(t) for t in files]
2072 def a():
2072 def a():
2073 for src, dest in renamefiles:
2073 for src, dest in renamefiles:
2074 util.rename(src, dest)
2074 util.rename(src, dest)
2075 return a
2075 return a
2076
2076
2077 def instance(ui, path, create):
2077 def instance(ui, path, create):
2078 return localrepository(ui, util.drop_scheme('file', path), create)
2078 return localrepository(ui, util.drop_scheme('file', path), create)
2079
2079
2080 def islocal(path):
2080 def islocal(path):
2081 return True
2081 return True
@@ -1,117 +1,117 b''
1 marked working directory as branch foo
1 marked working directory as branch foo
2 foo
2 foo
3 marked working directory as branch bar
3 marked working directory as branch bar
4 % branch shadowing
4 % branch shadowing
5 abort: a branch of the same name already exists (use --force to override)
5 abort: a branch of the same name already exists (use --force to override)
6 marked working directory as branch default
6 marked working directory as branch default
7 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
7 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 foo
8 foo
9 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
9 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
10 (branch merge, don't forget to commit)
10 (branch merge, don't forget to commit)
11 foo
11 foo
12 changeset: 5:5f8fb06e083e
12 changeset: 5:5f8fb06e083e
13 branch: foo
13 branch: foo
14 tag: tip
14 tag: tip
15 parent: 4:4909a3732169
15 parent: 4:4909a3732169
16 parent: 3:bf1bc2f45e83
16 parent: 3:bf1bc2f45e83
17 user: test
17 user: test
18 date: Mon Jan 12 13:46:40 1970 +0000
18 date: Mon Jan 12 13:46:40 1970 +0000
19 summary: merge
19 summary: merge
20
20
21 changeset: 4:4909a3732169
21 changeset: 4:4909a3732169
22 branch: foo
22 branch: foo
23 parent: 1:b699b1cec9c2
23 parent: 1:b699b1cec9c2
24 user: test
24 user: test
25 date: Mon Jan 12 13:46:40 1970 +0000
25 date: Mon Jan 12 13:46:40 1970 +0000
26 summary: modify a branch
26 summary: modify a branch
27
27
28 changeset: 3:bf1bc2f45e83
28 changeset: 3:bf1bc2f45e83
29 user: test
29 user: test
30 date: Mon Jan 12 13:46:40 1970 +0000
30 date: Mon Jan 12 13:46:40 1970 +0000
31 summary: clear branch name
31 summary: clear branch name
32
32
33 changeset: 2:67ec16bde7f1
33 changeset: 2:67ec16bde7f1
34 branch: bar
34 branch: bar
35 user: test
35 user: test
36 date: Mon Jan 12 13:46:40 1970 +0000
36 date: Mon Jan 12 13:46:40 1970 +0000
37 summary: change branch name
37 summary: change branch name
38
38
39 changeset: 1:b699b1cec9c2
39 changeset: 1:b699b1cec9c2
40 branch: foo
40 branch: foo
41 user: test
41 user: test
42 date: Mon Jan 12 13:46:40 1970 +0000
42 date: Mon Jan 12 13:46:40 1970 +0000
43 summary: add branch name
43 summary: add branch name
44
44
45 changeset: 0:be8523e69bf8
45 changeset: 0:be8523e69bf8
46 user: test
46 user: test
47 date: Mon Jan 12 13:46:40 1970 +0000
47 date: Mon Jan 12 13:46:40 1970 +0000
48 summary: initial
48 summary: initial
49
49
50 foo 5:5f8fb06e083e
50 foo 5:5f8fb06e083e
51 default 3:bf1bc2f45e83 (inactive)
51 default 3:bf1bc2f45e83 (inactive)
52 bar 2:67ec16bde7f1 (inactive)
52 bar 2:67ec16bde7f1 (inactive)
53 foo
53 foo
54 default
54 default
55 bar
55 bar
56 % test for invalid branch cache
56 % test for invalid branch cache
57 rolling back last transaction
57 rolling back last transaction
58 changeset: 4:4909a3732169
58 changeset: 4:4909a3732169
59 branch: foo
59 branch: foo
60 tag: tip
60 tag: tip
61 parent: 1:b699b1cec9c2
61 parent: 1:b699b1cec9c2
62 user: test
62 user: test
63 date: Mon Jan 12 13:46:40 1970 +0000
63 date: Mon Jan 12 13:46:40 1970 +0000
64 summary: modify a branch
64 summary: modify a branch
65
65
66 Invalid branch cache: unknown tip
66 invalidating branch cache (tip differs)
67 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
67 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
68 branch: foo
68 branch: foo
69 tag: tip
69 tag: tip
70 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
70 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
71 parent: -1:0000000000000000000000000000000000000000
71 parent: -1:0000000000000000000000000000000000000000
72 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
72 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
73 user: test
73 user: test
74 date: Mon Jan 12 13:46:40 1970 +0000
74 date: Mon Jan 12 13:46:40 1970 +0000
75 files: a
75 files: a
76 extra: branch=foo
76 extra: branch=foo
77 description:
77 description:
78 modify a branch
78 modify a branch
79
79
80
80
81 4:4909a3732169
81 4:4909a3732169
82 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
82 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
83 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
83 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
84 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
84 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
85 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
85 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
86 % push should update the branch cache
86 % push should update the branch cache
87 % pushing just rev 0
87 % pushing just rev 0
88 be8523e69bf892e25817fc97187516b3c0804ae4 0
88 be8523e69bf892e25817fc97187516b3c0804ae4 0
89 be8523e69bf892e25817fc97187516b3c0804ae4 default
89 be8523e69bf892e25817fc97187516b3c0804ae4 default
90 % pushing everything
90 % pushing everything
91 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
91 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
92 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
92 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
93 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
93 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
94 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
94 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
95 % update with no arguments: tipmost revision of the current branch
95 % update with no arguments: tipmost revision of the current branch
96 bf1bc2f45e83
96 bf1bc2f45e83
97 4909a3732169 (foo) tip
97 4909a3732169 (foo) tip
98 marked working directory as branch foobar
98 marked working directory as branch foobar
99 abort: branch foobar not found
99 abort: branch foobar not found
100 % fastforward merge
100 % fastforward merge
101 marked working directory as branch ff
101 marked working directory as branch ff
102 adding ff
102 adding ff
103 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
103 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
104 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
104 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
105 (branch merge, don't forget to commit)
105 (branch merge, don't forget to commit)
106 foo
106 foo
107 changeset: 6:f0c74f92a385
107 changeset: 6:f0c74f92a385
108 branch: foo
108 branch: foo
109 tag: tip
109 tag: tip
110 parent: 4:4909a3732169
110 parent: 4:4909a3732169
111 parent: 5:c420d2121b71
111 parent: 5:c420d2121b71
112 user: test
112 user: test
113 date: Mon Jan 12 13:46:40 1970 +0000
113 date: Mon Jan 12 13:46:40 1970 +0000
114 summary: Merge ff into foo
114 summary: Merge ff into foo
115
115
116 a
116 a
117 ff
117 ff
General Comments 0
You need to be logged in to leave comments. Login now