##// END OF EJS Templates
localrepo: fix bugs in branchheads and add docstring...
Sune Foldager -
r9475:c295a82a default
parent child Browse files
Show More
@@ -1,2191 +1,2198 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.sharedpath = self.path
75 self.sharedpath = self.path
76 try:
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
78 if not os.path.exists(s):
79 raise error.RepoError(
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 self.sharedpath = s
81 self.sharedpath = s
82 except IOError, inst:
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
87 self.spath = self.store.path
88 self.sopener = self.store.opener
88 self.sopener = self.store.opener
89 self.sjoin = self.store.join
89 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
91
91
92 self.tagscache = None
92 self.tagscache = None
93 self._tagstypecache = None
93 self._tagstypecache = None
94 self.branchcache = None
94 self.branchcache = None
95 self._ubranchcache = None # UTF-8 version of branchcache
95 self._ubranchcache = None # UTF-8 version of branchcache
96 self._branchcachetip = None
96 self._branchcachetip = None
97 self.nodetagscache = None
97 self.nodetagscache = None
98 self.filterpats = {}
98 self.filterpats = {}
99 self._datafilters = {}
99 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
101
101
102 @propertycache
102 @propertycache
103 def changelog(self):
103 def changelog(self):
104 c = changelog.changelog(self.sopener)
104 c = changelog.changelog(self.sopener)
105 if 'HG_PENDING' in os.environ:
105 if 'HG_PENDING' in os.environ:
106 p = os.environ['HG_PENDING']
106 p = os.environ['HG_PENDING']
107 if p.startswith(self.root):
107 if p.startswith(self.root):
108 c.readpending('00changelog.i.a')
108 c.readpending('00changelog.i.a')
109 self.sopener.defversion = c.version
109 self.sopener.defversion = c.version
110 return c
110 return c
111
111
112 @propertycache
112 @propertycache
113 def manifest(self):
113 def manifest(self):
114 return manifest.manifest(self.sopener)
114 return manifest.manifest(self.sopener)
115
115
116 @propertycache
116 @propertycache
117 def dirstate(self):
117 def dirstate(self):
118 return dirstate.dirstate(self.opener, self.ui, self.root)
118 return dirstate.dirstate(self.opener, self.ui, self.root)
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid is None:
121 if changeid is None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, extra={}):
143 def _tag(self, names, node, message, local, user, date, extra={}):
144 if isinstance(names, str):
144 if isinstance(names, str):
145 allchars = names
145 allchars = names
146 names = (names,)
146 names = (names,)
147 else:
147 else:
148 allchars = ''.join(names)
148 allchars = ''.join(names)
149 for c in self.tag_disallowed:
149 for c in self.tag_disallowed:
150 if c in allchars:
150 if c in allchars:
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
152
152
153 for name in names:
153 for name in names:
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 local=local)
155 local=local)
156
156
157 def writetags(fp, names, munge, prevtags):
157 def writetags(fp, names, munge, prevtags):
158 fp.seek(0, 2)
158 fp.seek(0, 2)
159 if prevtags and prevtags[-1] != '\n':
159 if prevtags and prevtags[-1] != '\n':
160 fp.write('\n')
160 fp.write('\n')
161 for name in names:
161 for name in names:
162 m = munge and munge(name) or name
162 m = munge and munge(name) or name
163 if self._tagstypecache and name in self._tagstypecache:
163 if self._tagstypecache and name in self._tagstypecache:
164 old = self.tagscache.get(name, nullid)
164 old = self.tagscache.get(name, nullid)
165 fp.write('%s %s\n' % (hex(old), m))
165 fp.write('%s %s\n' % (hex(old), m))
166 fp.write('%s %s\n' % (hex(node), m))
166 fp.write('%s %s\n' % (hex(node), m))
167 fp.close()
167 fp.close()
168
168
169 prevtags = ''
169 prevtags = ''
170 if local:
170 if local:
171 try:
171 try:
172 fp = self.opener('localtags', 'r+')
172 fp = self.opener('localtags', 'r+')
173 except IOError:
173 except IOError:
174 fp = self.opener('localtags', 'a')
174 fp = self.opener('localtags', 'a')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177
177
178 # local tags are stored in the current charset
178 # local tags are stored in the current charset
179 writetags(fp, names, None, prevtags)
179 writetags(fp, names, None, prevtags)
180 for name in names:
180 for name in names:
181 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
182 return
182 return
183
183
184 try:
184 try:
185 fp = self.wfile('.hgtags', 'rb+')
185 fp = self.wfile('.hgtags', 'rb+')
186 except IOError:
186 except IOError:
187 fp = self.wfile('.hgtags', 'ab')
187 fp = self.wfile('.hgtags', 'ab')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # committed tags are stored in UTF-8
191 # committed tags are stored in UTF-8
192 writetags(fp, names, encoding.fromlocal, prevtags)
192 writetags(fp, names, encoding.fromlocal, prevtags)
193
193
194 if '.hgtags' not in self.dirstate:
194 if '.hgtags' not in self.dirstate:
195 self.add(['.hgtags'])
195 self.add(['.hgtags'])
196
196
197 m = match_.exact(self.root, '', ['.hgtags'])
197 m = match_.exact(self.root, '', ['.hgtags'])
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
199
199
200 for name in names:
200 for name in names:
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202
202
203 return tagnode
203 return tagnode
204
204
205 def tag(self, names, node, message, local, user, date):
205 def tag(self, names, node, message, local, user, date):
206 '''tag a revision with one or more symbolic names.
206 '''tag a revision with one or more symbolic names.
207
207
208 names is a list of strings or, when adding a single tag, names may be a
208 names is a list of strings or, when adding a single tag, names may be a
209 string.
209 string.
210
210
211 if local is True, the tags are stored in a per-repository file.
211 if local is True, the tags are stored in a per-repository file.
212 otherwise, they are stored in the .hgtags file, and a new
212 otherwise, they are stored in the .hgtags file, and a new
213 changeset is committed with the change.
213 changeset is committed with the change.
214
214
215 keyword arguments:
215 keyword arguments:
216
216
217 local: whether to store tags in non-version-controlled file
217 local: whether to store tags in non-version-controlled file
218 (default False)
218 (default False)
219
219
220 message: commit message to use if committing
220 message: commit message to use if committing
221
221
222 user: name of user to use if committing
222 user: name of user to use if committing
223
223
224 date: date tuple to use if committing'''
224 date: date tuple to use if committing'''
225
225
226 for x in self.status()[:5]:
226 for x in self.status()[:5]:
227 if '.hgtags' in x:
227 if '.hgtags' in x:
228 raise util.Abort(_('working copy of .hgtags is changed '
228 raise util.Abort(_('working copy of .hgtags is changed '
229 '(please commit .hgtags manually)'))
229 '(please commit .hgtags manually)'))
230
230
231 self.tags() # instantiate the cache
231 self.tags() # instantiate the cache
232 self._tag(names, node, message, local, user, date)
232 self._tag(names, node, message, local, user, date)
233
233
234 def tags(self):
234 def tags(self):
235 '''return a mapping of tag to node'''
235 '''return a mapping of tag to node'''
236 if self.tagscache:
236 if self.tagscache:
237 return self.tagscache
237 return self.tagscache
238
238
239 globaltags = {}
239 globaltags = {}
240 tagtypes = {}
240 tagtypes = {}
241
241
242 def readtags(lines, fn, tagtype):
242 def readtags(lines, fn, tagtype):
243 filetags = {}
243 filetags = {}
244 count = 0
244 count = 0
245
245
246 def warn(msg):
246 def warn(msg):
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248
248
249 for l in lines:
249 for l in lines:
250 count += 1
250 count += 1
251 if not l:
251 if not l:
252 continue
252 continue
253 s = l.split(" ", 1)
253 s = l.split(" ", 1)
254 if len(s) != 2:
254 if len(s) != 2:
255 warn(_("cannot parse entry"))
255 warn(_("cannot parse entry"))
256 continue
256 continue
257 node, key = s
257 node, key = s
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 try:
259 try:
260 bin_n = bin(node)
260 bin_n = bin(node)
261 except TypeError:
261 except TypeError:
262 warn(_("node '%s' is not well formed") % node)
262 warn(_("node '%s' is not well formed") % node)
263 continue
263 continue
264 if bin_n not in self.changelog.nodemap:
264 if bin_n not in self.changelog.nodemap:
265 # silently ignore as pull -r might cause this
265 # silently ignore as pull -r might cause this
266 continue
266 continue
267
267
268 h = []
268 h = []
269 if key in filetags:
269 if key in filetags:
270 n, h = filetags[key]
270 n, h = filetags[key]
271 h.append(n)
271 h.append(n)
272 filetags[key] = (bin_n, h)
272 filetags[key] = (bin_n, h)
273
273
274 for k, nh in filetags.iteritems():
274 for k, nh in filetags.iteritems():
275 if k not in globaltags:
275 if k not in globaltags:
276 globaltags[k] = nh
276 globaltags[k] = nh
277 tagtypes[k] = tagtype
277 tagtypes[k] = tagtype
278 continue
278 continue
279
279
280 # we prefer the global tag if:
280 # we prefer the global tag if:
281 # it supercedes us OR
281 # it supercedes us OR
282 # mutual supercedes and it has a higher rank
282 # mutual supercedes and it has a higher rank
283 # otherwise we win because we're tip-most
283 # otherwise we win because we're tip-most
284 an, ah = nh
284 an, ah = nh
285 bn, bh = globaltags[k]
285 bn, bh = globaltags[k]
286 if (bn != an and an in bh and
286 if (bn != an and an in bh and
287 (bn not in ah or len(bh) > len(ah))):
287 (bn not in ah or len(bh) > len(ah))):
288 an = bn
288 an = bn
289 ah.extend([n for n in bh if n not in ah])
289 ah.extend([n for n in bh if n not in ah])
290 globaltags[k] = an, ah
290 globaltags[k] = an, ah
291 tagtypes[k] = tagtype
291 tagtypes[k] = tagtype
292
292
293 seen = set()
293 seen = set()
294 f = None
294 f = None
295 ctxs = []
295 ctxs = []
296 for node in self.heads():
296 for node in self.heads():
297 try:
297 try:
298 fnode = self[node].filenode('.hgtags')
298 fnode = self[node].filenode('.hgtags')
299 except error.LookupError:
299 except error.LookupError:
300 continue
300 continue
301 if fnode not in seen:
301 if fnode not in seen:
302 seen.add(fnode)
302 seen.add(fnode)
303 if not f:
303 if not f:
304 f = self.filectx('.hgtags', fileid=fnode)
304 f = self.filectx('.hgtags', fileid=fnode)
305 else:
305 else:
306 f = f.filectx(fnode)
306 f = f.filectx(fnode)
307 ctxs.append(f)
307 ctxs.append(f)
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 for f in reversed(ctxs):
310 for f in reversed(ctxs):
311 readtags(f.data().splitlines(), f, "global")
311 readtags(f.data().splitlines(), f, "global")
312
312
313 try:
313 try:
314 data = encoding.fromlocal(self.opener("localtags").read())
314 data = encoding.fromlocal(self.opener("localtags").read())
315 # localtags are stored in the local character set
315 # localtags are stored in the local character set
316 # while the internal tag table is stored in UTF-8
316 # while the internal tag table is stored in UTF-8
317 readtags(data.splitlines(), "localtags", "local")
317 readtags(data.splitlines(), "localtags", "local")
318 except IOError:
318 except IOError:
319 pass
319 pass
320
320
321 self.tagscache = {}
321 self.tagscache = {}
322 self._tagstypecache = {}
322 self._tagstypecache = {}
323 for k, nh in globaltags.iteritems():
323 for k, nh in globaltags.iteritems():
324 n = nh[0]
324 n = nh[0]
325 if n != nullid:
325 if n != nullid:
326 self.tagscache[k] = n
326 self.tagscache[k] = n
327 self._tagstypecache[k] = tagtypes[k]
327 self._tagstypecache[k] = tagtypes[k]
328 self.tagscache['tip'] = self.changelog.tip()
328 self.tagscache['tip'] = self.changelog.tip()
329 return self.tagscache
329 return self.tagscache
330
330
331 def tagtype(self, tagname):
331 def tagtype(self, tagname):
332 '''
332 '''
333 return the type of the given tag. result can be:
333 return the type of the given tag. result can be:
334
334
335 'local' : a local tag
335 'local' : a local tag
336 'global' : a global tag
336 'global' : a global tag
337 None : tag does not exist
337 None : tag does not exist
338 '''
338 '''
339
339
340 self.tags()
340 self.tags()
341
341
342 return self._tagstypecache.get(tagname)
342 return self._tagstypecache.get(tagname)
343
343
344 def tagslist(self):
344 def tagslist(self):
345 '''return a list of tags ordered by revision'''
345 '''return a list of tags ordered by revision'''
346 l = []
346 l = []
347 for t, n in self.tags().iteritems():
347 for t, n in self.tags().iteritems():
348 try:
348 try:
349 r = self.changelog.rev(n)
349 r = self.changelog.rev(n)
350 except:
350 except:
351 r = -2 # sort to the beginning of the list if unknown
351 r = -2 # sort to the beginning of the list if unknown
352 l.append((r, t, n))
352 l.append((r, t, n))
353 return [(t, n) for r, t, n in sorted(l)]
353 return [(t, n) for r, t, n in sorted(l)]
354
354
355 def nodetags(self, node):
355 def nodetags(self, node):
356 '''return the tags associated with a node'''
356 '''return the tags associated with a node'''
357 if not self.nodetagscache:
357 if not self.nodetagscache:
358 self.nodetagscache = {}
358 self.nodetagscache = {}
359 for t, n in self.tags().iteritems():
359 for t, n in self.tags().iteritems():
360 self.nodetagscache.setdefault(n, []).append(t)
360 self.nodetagscache.setdefault(n, []).append(t)
361 return self.nodetagscache.get(node, [])
361 return self.nodetagscache.get(node, [])
362
362
363 def _branchtags(self, partial, lrev):
363 def _branchtags(self, partial, lrev):
364 # TODO: rename this function?
364 # TODO: rename this function?
365 tiprev = len(self) - 1
365 tiprev = len(self) - 1
366 if lrev != tiprev:
366 if lrev != tiprev:
367 self._updatebranchcache(partial, lrev+1, tiprev+1)
367 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 self._writebranchcache(partial, self.changelog.tip(), tiprev)
368 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369
369
370 return partial
370 return partial
371
371
372 def branchmap(self):
372 def branchmap(self):
373 tip = self.changelog.tip()
373 tip = self.changelog.tip()
374 if self.branchcache is not None and self._branchcachetip == tip:
374 if self.branchcache is not None and self._branchcachetip == tip:
375 return self.branchcache
375 return self.branchcache
376
376
377 oldtip = self._branchcachetip
377 oldtip = self._branchcachetip
378 self._branchcachetip = tip
378 self._branchcachetip = tip
379 if self.branchcache is None:
379 if self.branchcache is None:
380 self.branchcache = {} # avoid recursion in changectx
380 self.branchcache = {} # avoid recursion in changectx
381 else:
381 else:
382 self.branchcache.clear() # keep using the same dict
382 self.branchcache.clear() # keep using the same dict
383 if oldtip is None or oldtip not in self.changelog.nodemap:
383 if oldtip is None or oldtip not in self.changelog.nodemap:
384 partial, last, lrev = self._readbranchcache()
384 partial, last, lrev = self._readbranchcache()
385 else:
385 else:
386 lrev = self.changelog.rev(oldtip)
386 lrev = self.changelog.rev(oldtip)
387 partial = self._ubranchcache
387 partial = self._ubranchcache
388
388
389 self._branchtags(partial, lrev)
389 self._branchtags(partial, lrev)
390 # this private cache holds all heads (not just tips)
390 # this private cache holds all heads (not just tips)
391 self._ubranchcache = partial
391 self._ubranchcache = partial
392
392
393 # the branch cache is stored on disk as UTF-8, but in the local
393 # the branch cache is stored on disk as UTF-8, but in the local
394 # charset internally
394 # charset internally
395 for k, v in partial.iteritems():
395 for k, v in partial.iteritems():
396 self.branchcache[encoding.tolocal(k)] = v
396 self.branchcache[encoding.tolocal(k)] = v
397 return self.branchcache
397 return self.branchcache
398
398
399
399
400 def branchtags(self):
400 def branchtags(self):
401 '''return a dict where branch names map to the tipmost head of
401 '''return a dict where branch names map to the tipmost head of
402 the branch, open heads come before closed'''
402 the branch, open heads come before closed'''
403 bt = {}
403 bt = {}
404 for bn, heads in self.branchmap().iteritems():
404 for bn, heads in self.branchmap().iteritems():
405 head = None
405 head = None
406 for i in range(len(heads)-1, -1, -1):
406 for i in range(len(heads)-1, -1, -1):
407 h = heads[i]
407 h = heads[i]
408 if 'close' not in self.changelog.read(h)[5]:
408 if 'close' not in self.changelog.read(h)[5]:
409 head = h
409 head = h
410 break
410 break
411 # no open heads were found
411 # no open heads were found
412 if head is None:
412 if head is None:
413 head = heads[-1]
413 head = heads[-1]
414 bt[bn] = head
414 bt[bn] = head
415 return bt
415 return bt
416
416
417
417
418 def _readbranchcache(self):
418 def _readbranchcache(self):
419 partial = {}
419 partial = {}
420 try:
420 try:
421 f = self.opener("branchheads.cache")
421 f = self.opener("branchheads.cache")
422 lines = f.read().split('\n')
422 lines = f.read().split('\n')
423 f.close()
423 f.close()
424 except (IOError, OSError):
424 except (IOError, OSError):
425 return {}, nullid, nullrev
425 return {}, nullid, nullrev
426
426
427 try:
427 try:
428 last, lrev = lines.pop(0).split(" ", 1)
428 last, lrev = lines.pop(0).split(" ", 1)
429 last, lrev = bin(last), int(lrev)
429 last, lrev = bin(last), int(lrev)
430 if lrev >= len(self) or self[lrev].node() != last:
430 if lrev >= len(self) or self[lrev].node() != last:
431 # invalidate the cache
431 # invalidate the cache
432 raise ValueError('invalidating branch cache (tip differs)')
432 raise ValueError('invalidating branch cache (tip differs)')
433 for l in lines:
433 for l in lines:
434 if not l: continue
434 if not l: continue
435 node, label = l.split(" ", 1)
435 node, label = l.split(" ", 1)
436 partial.setdefault(label.strip(), []).append(bin(node))
436 partial.setdefault(label.strip(), []).append(bin(node))
437 except KeyboardInterrupt:
437 except KeyboardInterrupt:
438 raise
438 raise
439 except Exception, inst:
439 except Exception, inst:
440 if self.ui.debugflag:
440 if self.ui.debugflag:
441 self.ui.warn(str(inst), '\n')
441 self.ui.warn(str(inst), '\n')
442 partial, last, lrev = {}, nullid, nullrev
442 partial, last, lrev = {}, nullid, nullrev
443 return partial, last, lrev
443 return partial, last, lrev
444
444
445 def _writebranchcache(self, branches, tip, tiprev):
445 def _writebranchcache(self, branches, tip, tiprev):
446 try:
446 try:
447 f = self.opener("branchheads.cache", "w", atomictemp=True)
447 f = self.opener("branchheads.cache", "w", atomictemp=True)
448 f.write("%s %s\n" % (hex(tip), tiprev))
448 f.write("%s %s\n" % (hex(tip), tiprev))
449 for label, nodes in branches.iteritems():
449 for label, nodes in branches.iteritems():
450 for node in nodes:
450 for node in nodes:
451 f.write("%s %s\n" % (hex(node), label))
451 f.write("%s %s\n" % (hex(node), label))
452 f.rename()
452 f.rename()
453 except (IOError, OSError):
453 except (IOError, OSError):
454 pass
454 pass
455
455
456 def _updatebranchcache(self, partial, start, end):
456 def _updatebranchcache(self, partial, start, end):
457 # collect new branch entries
457 # collect new branch entries
458 newbranches = {}
458 newbranches = {}
459 for r in xrange(start, end):
459 for r in xrange(start, end):
460 c = self[r]
460 c = self[r]
461 newbranches.setdefault(c.branch(), []).append(c.node())
461 newbranches.setdefault(c.branch(), []).append(c.node())
462 # if older branchheads are reachable from new ones, they aren't
462 # if older branchheads are reachable from new ones, they aren't
463 # really branchheads. Note checking parents is insufficient:
463 # really branchheads. Note checking parents is insufficient:
464 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
464 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
465 for branch, newnodes in newbranches.iteritems():
465 for branch, newnodes in newbranches.iteritems():
466 bheads = partial.setdefault(branch, [])
466 bheads = partial.setdefault(branch, [])
467 bheads.extend(newnodes)
467 bheads.extend(newnodes)
468 if len(bheads) < 2:
468 if len(bheads) < 2:
469 continue
469 continue
470 newbheads = []
470 newbheads = []
471 # starting from tip means fewer passes over reachable
471 # starting from tip means fewer passes over reachable
472 while newnodes:
472 while newnodes:
473 latest = newnodes.pop()
473 latest = newnodes.pop()
474 if latest not in bheads:
474 if latest not in bheads:
475 continue
475 continue
476 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
476 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
477 reachable = self.changelog.reachable(latest, minbhrev)
477 reachable = self.changelog.reachable(latest, minbhrev)
478 bheads = [b for b in bheads if b not in reachable]
478 bheads = [b for b in bheads if b not in reachable]
479 newbheads.insert(0, latest)
479 newbheads.insert(0, latest)
480 bheads.extend(newbheads)
480 bheads.extend(newbheads)
481 partial[branch] = bheads
481 partial[branch] = bheads
482
482
483 def lookup(self, key):
483 def lookup(self, key):
484 if isinstance(key, int):
484 if isinstance(key, int):
485 return self.changelog.node(key)
485 return self.changelog.node(key)
486 elif key == '.':
486 elif key == '.':
487 return self.dirstate.parents()[0]
487 return self.dirstate.parents()[0]
488 elif key == 'null':
488 elif key == 'null':
489 return nullid
489 return nullid
490 elif key == 'tip':
490 elif key == 'tip':
491 return self.changelog.tip()
491 return self.changelog.tip()
492 n = self.changelog._match(key)
492 n = self.changelog._match(key)
493 if n:
493 if n:
494 return n
494 return n
495 if key in self.tags():
495 if key in self.tags():
496 return self.tags()[key]
496 return self.tags()[key]
497 if key in self.branchtags():
497 if key in self.branchtags():
498 return self.branchtags()[key]
498 return self.branchtags()[key]
499 n = self.changelog._partialmatch(key)
499 n = self.changelog._partialmatch(key)
500 if n:
500 if n:
501 return n
501 return n
502
502
503 # can't find key, check if it might have come from damaged dirstate
503 # can't find key, check if it might have come from damaged dirstate
504 if key in self.dirstate.parents():
504 if key in self.dirstate.parents():
505 raise error.Abort(_("working directory has unknown parent '%s'!")
505 raise error.Abort(_("working directory has unknown parent '%s'!")
506 % short(key))
506 % short(key))
507 try:
507 try:
508 if len(key) == 20:
508 if len(key) == 20:
509 key = hex(key)
509 key = hex(key)
510 except:
510 except:
511 pass
511 pass
512 raise error.RepoLookupError(_("unknown revision '%s'") % key)
512 raise error.RepoLookupError(_("unknown revision '%s'") % key)
513
513
514 def local(self):
514 def local(self):
515 return True
515 return True
516
516
517 def join(self, f):
517 def join(self, f):
518 return os.path.join(self.path, f)
518 return os.path.join(self.path, f)
519
519
520 def wjoin(self, f):
520 def wjoin(self, f):
521 return os.path.join(self.root, f)
521 return os.path.join(self.root, f)
522
522
523 def rjoin(self, f):
523 def rjoin(self, f):
524 return os.path.join(self.root, util.pconvert(f))
524 return os.path.join(self.root, util.pconvert(f))
525
525
526 def file(self, f):
526 def file(self, f):
527 if f[0] == '/':
527 if f[0] == '/':
528 f = f[1:]
528 f = f[1:]
529 return filelog.filelog(self.sopener, f)
529 return filelog.filelog(self.sopener, f)
530
530
531 def changectx(self, changeid):
531 def changectx(self, changeid):
532 return self[changeid]
532 return self[changeid]
533
533
534 def parents(self, changeid=None):
534 def parents(self, changeid=None):
535 '''get list of changectxs for parents of changeid'''
535 '''get list of changectxs for parents of changeid'''
536 return self[changeid].parents()
536 return self[changeid].parents()
537
537
538 def filectx(self, path, changeid=None, fileid=None):
538 def filectx(self, path, changeid=None, fileid=None):
539 """changeid can be a changeset revision, node, or tag.
539 """changeid can be a changeset revision, node, or tag.
540 fileid can be a file revision or node."""
540 fileid can be a file revision or node."""
541 return context.filectx(self, path, changeid, fileid)
541 return context.filectx(self, path, changeid, fileid)
542
542
543 def getcwd(self):
543 def getcwd(self):
544 return self.dirstate.getcwd()
544 return self.dirstate.getcwd()
545
545
546 def pathto(self, f, cwd=None):
546 def pathto(self, f, cwd=None):
547 return self.dirstate.pathto(f, cwd)
547 return self.dirstate.pathto(f, cwd)
548
548
549 def wfile(self, f, mode='r'):
549 def wfile(self, f, mode='r'):
550 return self.wopener(f, mode)
550 return self.wopener(f, mode)
551
551
552 def _link(self, f):
552 def _link(self, f):
553 return os.path.islink(self.wjoin(f))
553 return os.path.islink(self.wjoin(f))
554
554
555 def _filter(self, filter, filename, data):
555 def _filter(self, filter, filename, data):
556 if filter not in self.filterpats:
556 if filter not in self.filterpats:
557 l = []
557 l = []
558 for pat, cmd in self.ui.configitems(filter):
558 for pat, cmd in self.ui.configitems(filter):
559 if cmd == '!':
559 if cmd == '!':
560 continue
560 continue
561 mf = match_.match(self.root, '', [pat])
561 mf = match_.match(self.root, '', [pat])
562 fn = None
562 fn = None
563 params = cmd
563 params = cmd
564 for name, filterfn in self._datafilters.iteritems():
564 for name, filterfn in self._datafilters.iteritems():
565 if cmd.startswith(name):
565 if cmd.startswith(name):
566 fn = filterfn
566 fn = filterfn
567 params = cmd[len(name):].lstrip()
567 params = cmd[len(name):].lstrip()
568 break
568 break
569 if not fn:
569 if not fn:
570 fn = lambda s, c, **kwargs: util.filter(s, c)
570 fn = lambda s, c, **kwargs: util.filter(s, c)
571 # Wrap old filters not supporting keyword arguments
571 # Wrap old filters not supporting keyword arguments
572 if not inspect.getargspec(fn)[2]:
572 if not inspect.getargspec(fn)[2]:
573 oldfn = fn
573 oldfn = fn
574 fn = lambda s, c, **kwargs: oldfn(s, c)
574 fn = lambda s, c, **kwargs: oldfn(s, c)
575 l.append((mf, fn, params))
575 l.append((mf, fn, params))
576 self.filterpats[filter] = l
576 self.filterpats[filter] = l
577
577
578 for mf, fn, cmd in self.filterpats[filter]:
578 for mf, fn, cmd in self.filterpats[filter]:
579 if mf(filename):
579 if mf(filename):
580 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
580 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
581 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
581 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
582 break
582 break
583
583
584 return data
584 return data
585
585
586 def adddatafilter(self, name, filter):
586 def adddatafilter(self, name, filter):
587 self._datafilters[name] = filter
587 self._datafilters[name] = filter
588
588
589 def wread(self, filename):
589 def wread(self, filename):
590 if self._link(filename):
590 if self._link(filename):
591 data = os.readlink(self.wjoin(filename))
591 data = os.readlink(self.wjoin(filename))
592 else:
592 else:
593 data = self.wopener(filename, 'r').read()
593 data = self.wopener(filename, 'r').read()
594 return self._filter("encode", filename, data)
594 return self._filter("encode", filename, data)
595
595
596 def wwrite(self, filename, data, flags):
596 def wwrite(self, filename, data, flags):
597 data = self._filter("decode", filename, data)
597 data = self._filter("decode", filename, data)
598 try:
598 try:
599 os.unlink(self.wjoin(filename))
599 os.unlink(self.wjoin(filename))
600 except OSError:
600 except OSError:
601 pass
601 pass
602 if 'l' in flags:
602 if 'l' in flags:
603 self.wopener.symlink(data, filename)
603 self.wopener.symlink(data, filename)
604 else:
604 else:
605 self.wopener(filename, 'w').write(data)
605 self.wopener(filename, 'w').write(data)
606 if 'x' in flags:
606 if 'x' in flags:
607 util.set_flags(self.wjoin(filename), False, True)
607 util.set_flags(self.wjoin(filename), False, True)
608
608
609 def wwritedata(self, filename, data):
609 def wwritedata(self, filename, data):
610 return self._filter("decode", filename, data)
610 return self._filter("decode", filename, data)
611
611
612 def transaction(self):
612 def transaction(self):
613 tr = self._transref and self._transref() or None
613 tr = self._transref and self._transref() or None
614 if tr and tr.running():
614 if tr and tr.running():
615 return tr.nest()
615 return tr.nest()
616
616
617 # abort here if the journal already exists
617 # abort here if the journal already exists
618 if os.path.exists(self.sjoin("journal")):
618 if os.path.exists(self.sjoin("journal")):
619 raise error.RepoError(_("journal already exists - run hg recover"))
619 raise error.RepoError(_("journal already exists - run hg recover"))
620
620
621 # save dirstate for rollback
621 # save dirstate for rollback
622 try:
622 try:
623 ds = self.opener("dirstate").read()
623 ds = self.opener("dirstate").read()
624 except IOError:
624 except IOError:
625 ds = ""
625 ds = ""
626 self.opener("journal.dirstate", "w").write(ds)
626 self.opener("journal.dirstate", "w").write(ds)
627 self.opener("journal.branch", "w").write(self.dirstate.branch())
627 self.opener("journal.branch", "w").write(self.dirstate.branch())
628
628
629 renames = [(self.sjoin("journal"), self.sjoin("undo")),
629 renames = [(self.sjoin("journal"), self.sjoin("undo")),
630 (self.join("journal.dirstate"), self.join("undo.dirstate")),
630 (self.join("journal.dirstate"), self.join("undo.dirstate")),
631 (self.join("journal.branch"), self.join("undo.branch"))]
631 (self.join("journal.branch"), self.join("undo.branch"))]
632 tr = transaction.transaction(self.ui.warn, self.sopener,
632 tr = transaction.transaction(self.ui.warn, self.sopener,
633 self.sjoin("journal"),
633 self.sjoin("journal"),
634 aftertrans(renames),
634 aftertrans(renames),
635 self.store.createmode)
635 self.store.createmode)
636 self._transref = weakref.ref(tr)
636 self._transref = weakref.ref(tr)
637 return tr
637 return tr
638
638
639 def recover(self):
639 def recover(self):
640 lock = self.lock()
640 lock = self.lock()
641 try:
641 try:
642 if os.path.exists(self.sjoin("journal")):
642 if os.path.exists(self.sjoin("journal")):
643 self.ui.status(_("rolling back interrupted transaction\n"))
643 self.ui.status(_("rolling back interrupted transaction\n"))
644 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
644 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
645 self.invalidate()
645 self.invalidate()
646 return True
646 return True
647 else:
647 else:
648 self.ui.warn(_("no interrupted transaction available\n"))
648 self.ui.warn(_("no interrupted transaction available\n"))
649 return False
649 return False
650 finally:
650 finally:
651 lock.release()
651 lock.release()
652
652
653 def rollback(self):
653 def rollback(self):
654 wlock = lock = None
654 wlock = lock = None
655 try:
655 try:
656 wlock = self.wlock()
656 wlock = self.wlock()
657 lock = self.lock()
657 lock = self.lock()
658 if os.path.exists(self.sjoin("undo")):
658 if os.path.exists(self.sjoin("undo")):
659 self.ui.status(_("rolling back last transaction\n"))
659 self.ui.status(_("rolling back last transaction\n"))
660 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
660 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
661 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
661 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
662 try:
662 try:
663 branch = self.opener("undo.branch").read()
663 branch = self.opener("undo.branch").read()
664 self.dirstate.setbranch(branch)
664 self.dirstate.setbranch(branch)
665 except IOError:
665 except IOError:
666 self.ui.warn(_("Named branch could not be reset, "
666 self.ui.warn(_("Named branch could not be reset, "
667 "current branch still is: %s\n")
667 "current branch still is: %s\n")
668 % encoding.tolocal(self.dirstate.branch()))
668 % encoding.tolocal(self.dirstate.branch()))
669 self.invalidate()
669 self.invalidate()
670 self.dirstate.invalidate()
670 self.dirstate.invalidate()
671 else:
671 else:
672 self.ui.warn(_("no rollback information available\n"))
672 self.ui.warn(_("no rollback information available\n"))
673 finally:
673 finally:
674 release(lock, wlock)
674 release(lock, wlock)
675
675
676 def invalidate(self):
676 def invalidate(self):
677 for a in "changelog manifest".split():
677 for a in "changelog manifest".split():
678 if a in self.__dict__:
678 if a in self.__dict__:
679 delattr(self, a)
679 delattr(self, a)
680 self.tagscache = None
680 self.tagscache = None
681 self._tagstypecache = None
681 self._tagstypecache = None
682 self.nodetagscache = None
682 self.nodetagscache = None
683 self.branchcache = None
683 self.branchcache = None
684 self._ubranchcache = None
684 self._ubranchcache = None
685 self._branchcachetip = None
685 self._branchcachetip = None
686
686
687 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
687 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
688 try:
688 try:
689 l = lock.lock(lockname, 0, releasefn, desc=desc)
689 l = lock.lock(lockname, 0, releasefn, desc=desc)
690 except error.LockHeld, inst:
690 except error.LockHeld, inst:
691 if not wait:
691 if not wait:
692 raise
692 raise
693 self.ui.warn(_("waiting for lock on %s held by %r\n") %
693 self.ui.warn(_("waiting for lock on %s held by %r\n") %
694 (desc, inst.locker))
694 (desc, inst.locker))
695 # default to 600 seconds timeout
695 # default to 600 seconds timeout
696 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
696 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
697 releasefn, desc=desc)
697 releasefn, desc=desc)
698 if acquirefn:
698 if acquirefn:
699 acquirefn()
699 acquirefn()
700 return l
700 return l
701
701
702 def lock(self, wait=True):
702 def lock(self, wait=True):
703 l = self._lockref and self._lockref()
703 l = self._lockref and self._lockref()
704 if l is not None and l.held:
704 if l is not None and l.held:
705 l.lock()
705 l.lock()
706 return l
706 return l
707
707
708 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
708 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
709 _('repository %s') % self.origroot)
709 _('repository %s') % self.origroot)
710 self._lockref = weakref.ref(l)
710 self._lockref = weakref.ref(l)
711 return l
711 return l
712
712
713 def wlock(self, wait=True):
713 def wlock(self, wait=True):
714 l = self._wlockref and self._wlockref()
714 l = self._wlockref and self._wlockref()
715 if l is not None and l.held:
715 if l is not None and l.held:
716 l.lock()
716 l.lock()
717 return l
717 return l
718
718
719 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
719 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
720 self.dirstate.invalidate, _('working directory of %s') %
720 self.dirstate.invalidate, _('working directory of %s') %
721 self.origroot)
721 self.origroot)
722 self._wlockref = weakref.ref(l)
722 self._wlockref = weakref.ref(l)
723 return l
723 return l
724
724
725 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
725 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
726 """
726 """
727 commit an individual file as part of a larger transaction
727 commit an individual file as part of a larger transaction
728 """
728 """
729
729
730 fname = fctx.path()
730 fname = fctx.path()
731 text = fctx.data()
731 text = fctx.data()
732 flog = self.file(fname)
732 flog = self.file(fname)
733 fparent1 = manifest1.get(fname, nullid)
733 fparent1 = manifest1.get(fname, nullid)
734 fparent2 = fparent2o = manifest2.get(fname, nullid)
734 fparent2 = fparent2o = manifest2.get(fname, nullid)
735
735
736 meta = {}
736 meta = {}
737 copy = fctx.renamed()
737 copy = fctx.renamed()
738 if copy and copy[0] != fname:
738 if copy and copy[0] != fname:
739 # Mark the new revision of this file as a copy of another
739 # Mark the new revision of this file as a copy of another
740 # file. This copy data will effectively act as a parent
740 # file. This copy data will effectively act as a parent
741 # of this new revision. If this is a merge, the first
741 # of this new revision. If this is a merge, the first
742 # parent will be the nullid (meaning "look up the copy data")
742 # parent will be the nullid (meaning "look up the copy data")
743 # and the second one will be the other parent. For example:
743 # and the second one will be the other parent. For example:
744 #
744 #
745 # 0 --- 1 --- 3 rev1 changes file foo
745 # 0 --- 1 --- 3 rev1 changes file foo
746 # \ / rev2 renames foo to bar and changes it
746 # \ / rev2 renames foo to bar and changes it
747 # \- 2 -/ rev3 should have bar with all changes and
747 # \- 2 -/ rev3 should have bar with all changes and
748 # should record that bar descends from
748 # should record that bar descends from
749 # bar in rev2 and foo in rev1
749 # bar in rev2 and foo in rev1
750 #
750 #
751 # this allows this merge to succeed:
751 # this allows this merge to succeed:
752 #
752 #
753 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
753 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
754 # \ / merging rev3 and rev4 should use bar@rev2
754 # \ / merging rev3 and rev4 should use bar@rev2
755 # \- 2 --- 4 as the merge base
755 # \- 2 --- 4 as the merge base
756 #
756 #
757
757
758 cfname = copy[0]
758 cfname = copy[0]
759 crev = manifest1.get(cfname)
759 crev = manifest1.get(cfname)
760 newfparent = fparent2
760 newfparent = fparent2
761
761
762 if manifest2: # branch merge
762 if manifest2: # branch merge
763 if fparent2 == nullid or crev is None: # copied on remote side
763 if fparent2 == nullid or crev is None: # copied on remote side
764 if cfname in manifest2:
764 if cfname in manifest2:
765 crev = manifest2[cfname]
765 crev = manifest2[cfname]
766 newfparent = fparent1
766 newfparent = fparent1
767
767
768 # find source in nearest ancestor if we've lost track
768 # find source in nearest ancestor if we've lost track
769 if not crev:
769 if not crev:
770 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
770 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
771 (fname, cfname))
771 (fname, cfname))
772 for ancestor in self['.'].ancestors():
772 for ancestor in self['.'].ancestors():
773 if cfname in ancestor:
773 if cfname in ancestor:
774 crev = ancestor[cfname].filenode()
774 crev = ancestor[cfname].filenode()
775 break
775 break
776
776
777 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
777 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
778 meta["copy"] = cfname
778 meta["copy"] = cfname
779 meta["copyrev"] = hex(crev)
779 meta["copyrev"] = hex(crev)
780 fparent1, fparent2 = nullid, newfparent
780 fparent1, fparent2 = nullid, newfparent
781 elif fparent2 != nullid:
781 elif fparent2 != nullid:
782 # is one parent an ancestor of the other?
782 # is one parent an ancestor of the other?
783 fparentancestor = flog.ancestor(fparent1, fparent2)
783 fparentancestor = flog.ancestor(fparent1, fparent2)
784 if fparentancestor == fparent1:
784 if fparentancestor == fparent1:
785 fparent1, fparent2 = fparent2, nullid
785 fparent1, fparent2 = fparent2, nullid
786 elif fparentancestor == fparent2:
786 elif fparentancestor == fparent2:
787 fparent2 = nullid
787 fparent2 = nullid
788
788
789 # is the file changed?
789 # is the file changed?
790 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
790 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
791 changelist.append(fname)
791 changelist.append(fname)
792 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
792 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
793
793
794 # are just the flags changed during merge?
794 # are just the flags changed during merge?
795 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
795 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
796 changelist.append(fname)
796 changelist.append(fname)
797
797
798 return fparent1
798 return fparent1
799
799
800 def commit(self, text="", user=None, date=None, match=None, force=False,
800 def commit(self, text="", user=None, date=None, match=None, force=False,
801 editor=False, extra={}):
801 editor=False, extra={}):
802 """Add a new revision to current repository.
802 """Add a new revision to current repository.
803
803
804 Revision information is gathered from the working directory,
804 Revision information is gathered from the working directory,
805 match can be used to filter the committed files. If editor is
805 match can be used to filter the committed files. If editor is
806 supplied, it is called to get a commit message.
806 supplied, it is called to get a commit message.
807 """
807 """
808
808
809 def fail(f, msg):
809 def fail(f, msg):
810 raise util.Abort('%s: %s' % (f, msg))
810 raise util.Abort('%s: %s' % (f, msg))
811
811
812 if not match:
812 if not match:
813 match = match_.always(self.root, '')
813 match = match_.always(self.root, '')
814
814
815 if not force:
815 if not force:
816 vdirs = []
816 vdirs = []
817 match.dir = vdirs.append
817 match.dir = vdirs.append
818 match.bad = fail
818 match.bad = fail
819
819
820 wlock = self.wlock()
820 wlock = self.wlock()
821 try:
821 try:
822 p1, p2 = self.dirstate.parents()
822 p1, p2 = self.dirstate.parents()
823 wctx = self[None]
823 wctx = self[None]
824
824
825 if (not force and p2 != nullid and match and
825 if (not force and p2 != nullid and match and
826 (match.files() or match.anypats())):
826 (match.files() or match.anypats())):
827 raise util.Abort(_('cannot partially commit a merge '
827 raise util.Abort(_('cannot partially commit a merge '
828 '(do not specify files or patterns)'))
828 '(do not specify files or patterns)'))
829
829
830 changes = self.status(match=match, clean=force)
830 changes = self.status(match=match, clean=force)
831 if force:
831 if force:
832 changes[0].extend(changes[6]) # mq may commit unchanged files
832 changes[0].extend(changes[6]) # mq may commit unchanged files
833
833
834 # check subrepos
834 # check subrepos
835 subs = []
835 subs = []
836 for s in wctx.substate:
836 for s in wctx.substate:
837 if match(s) and wctx.sub(s).dirty():
837 if match(s) and wctx.sub(s).dirty():
838 subs.append(s)
838 subs.append(s)
839 if subs and '.hgsubstate' not in changes[0]:
839 if subs and '.hgsubstate' not in changes[0]:
840 changes[0].insert(0, '.hgsubstate')
840 changes[0].insert(0, '.hgsubstate')
841
841
842 # make sure all explicit patterns are matched
842 # make sure all explicit patterns are matched
843 if not force and match.files():
843 if not force and match.files():
844 matched = set(changes[0] + changes[1] + changes[2])
844 matched = set(changes[0] + changes[1] + changes[2])
845
845
846 for f in match.files():
846 for f in match.files():
847 if f == '.' or f in matched or f in wctx.substate:
847 if f == '.' or f in matched or f in wctx.substate:
848 continue
848 continue
849 if f in changes[3]: # missing
849 if f in changes[3]: # missing
850 fail(f, _('file not found!'))
850 fail(f, _('file not found!'))
851 if f in vdirs: # visited directory
851 if f in vdirs: # visited directory
852 d = f + '/'
852 d = f + '/'
853 for mf in matched:
853 for mf in matched:
854 if mf.startswith(d):
854 if mf.startswith(d):
855 break
855 break
856 else:
856 else:
857 fail(f, _("no match under directory!"))
857 fail(f, _("no match under directory!"))
858 elif f not in self.dirstate:
858 elif f not in self.dirstate:
859 fail(f, _("file not tracked!"))
859 fail(f, _("file not tracked!"))
860
860
861 if (not force and not extra.get("close") and p2 == nullid
861 if (not force and not extra.get("close") and p2 == nullid
862 and not (changes[0] or changes[1] or changes[2])
862 and not (changes[0] or changes[1] or changes[2])
863 and self[None].branch() == self['.'].branch()):
863 and self[None].branch() == self['.'].branch()):
864 return None
864 return None
865
865
866 ms = merge_.mergestate(self)
866 ms = merge_.mergestate(self)
867 for f in changes[0]:
867 for f in changes[0]:
868 if f in ms and ms[f] == 'u':
868 if f in ms and ms[f] == 'u':
869 raise util.Abort(_("unresolved merge conflicts "
869 raise util.Abort(_("unresolved merge conflicts "
870 "(see hg resolve)"))
870 "(see hg resolve)"))
871
871
872 cctx = context.workingctx(self, (p1, p2), text, user, date,
872 cctx = context.workingctx(self, (p1, p2), text, user, date,
873 extra, changes)
873 extra, changes)
874 if editor:
874 if editor:
875 cctx._text = editor(self, cctx, subs)
875 cctx._text = editor(self, cctx, subs)
876
876
877 # commit subs
877 # commit subs
878 if subs:
878 if subs:
879 state = wctx.substate.copy()
879 state = wctx.substate.copy()
880 for s in subs:
880 for s in subs:
881 self.ui.status(_('committing subrepository %s\n') % s)
881 self.ui.status(_('committing subrepository %s\n') % s)
882 sr = wctx.sub(s).commit(cctx._text, user, date)
882 sr = wctx.sub(s).commit(cctx._text, user, date)
883 state[s] = (state[s][0], sr)
883 state[s] = (state[s][0], sr)
884 subrepo.writestate(self, state)
884 subrepo.writestate(self, state)
885
885
886 ret = self.commitctx(cctx, True)
886 ret = self.commitctx(cctx, True)
887
887
888 # update dirstate and mergestate
888 # update dirstate and mergestate
889 for f in changes[0] + changes[1]:
889 for f in changes[0] + changes[1]:
890 self.dirstate.normal(f)
890 self.dirstate.normal(f)
891 for f in changes[2]:
891 for f in changes[2]:
892 self.dirstate.forget(f)
892 self.dirstate.forget(f)
893 self.dirstate.setparents(ret)
893 self.dirstate.setparents(ret)
894 ms.reset()
894 ms.reset()
895
895
896 return ret
896 return ret
897
897
898 finally:
898 finally:
899 wlock.release()
899 wlock.release()
900
900
901 def commitctx(self, ctx, error=False):
901 def commitctx(self, ctx, error=False):
902 """Add a new revision to current repository.
902 """Add a new revision to current repository.
903
903
904 Revision information is passed via the context argument.
904 Revision information is passed via the context argument.
905 """
905 """
906
906
907 tr = lock = None
907 tr = lock = None
908 removed = ctx.removed()
908 removed = ctx.removed()
909 p1, p2 = ctx.p1(), ctx.p2()
909 p1, p2 = ctx.p1(), ctx.p2()
910 m1 = p1.manifest().copy()
910 m1 = p1.manifest().copy()
911 m2 = p2.manifest()
911 m2 = p2.manifest()
912 user = ctx.user()
912 user = ctx.user()
913
913
914 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
914 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
915 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
915 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
916
916
917 lock = self.lock()
917 lock = self.lock()
918 try:
918 try:
919 tr = self.transaction()
919 tr = self.transaction()
920 trp = weakref.proxy(tr)
920 trp = weakref.proxy(tr)
921
921
922 # check in files
922 # check in files
923 new = {}
923 new = {}
924 changed = []
924 changed = []
925 linkrev = len(self)
925 linkrev = len(self)
926 for f in sorted(ctx.modified() + ctx.added()):
926 for f in sorted(ctx.modified() + ctx.added()):
927 self.ui.note(f + "\n")
927 self.ui.note(f + "\n")
928 try:
928 try:
929 fctx = ctx[f]
929 fctx = ctx[f]
930 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
930 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
931 changed)
931 changed)
932 m1.set(f, fctx.flags())
932 m1.set(f, fctx.flags())
933 except (OSError, IOError):
933 except (OSError, IOError):
934 if error:
934 if error:
935 self.ui.warn(_("trouble committing %s!\n") % f)
935 self.ui.warn(_("trouble committing %s!\n") % f)
936 raise
936 raise
937 else:
937 else:
938 removed.append(f)
938 removed.append(f)
939
939
940 # update manifest
940 # update manifest
941 m1.update(new)
941 m1.update(new)
942 removed = [f for f in sorted(removed) if f in m1 or f in m2]
942 removed = [f for f in sorted(removed) if f in m1 or f in m2]
943 drop = [f for f in removed if f in m1]
943 drop = [f for f in removed if f in m1]
944 for f in drop:
944 for f in drop:
945 del m1[f]
945 del m1[f]
946 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
946 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
947 p2.manifestnode(), (new, drop))
947 p2.manifestnode(), (new, drop))
948
948
949 # update changelog
949 # update changelog
950 self.changelog.delayupdate()
950 self.changelog.delayupdate()
951 n = self.changelog.add(mn, changed + removed, ctx.description(),
951 n = self.changelog.add(mn, changed + removed, ctx.description(),
952 trp, p1.node(), p2.node(),
952 trp, p1.node(), p2.node(),
953 user, ctx.date(), ctx.extra().copy())
953 user, ctx.date(), ctx.extra().copy())
954 p = lambda: self.changelog.writepending() and self.root or ""
954 p = lambda: self.changelog.writepending() and self.root or ""
955 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
955 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
956 parent2=xp2, pending=p)
956 parent2=xp2, pending=p)
957 self.changelog.finalize(trp)
957 self.changelog.finalize(trp)
958 tr.close()
958 tr.close()
959
959
960 if self.branchcache:
960 if self.branchcache:
961 self.branchtags()
961 self.branchtags()
962
962
963 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
963 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
964 return n
964 return n
965 finally:
965 finally:
966 del tr
966 del tr
967 lock.release()
967 lock.release()
968
968
969 def walk(self, match, node=None):
969 def walk(self, match, node=None):
970 '''
970 '''
971 walk recursively through the directory tree or a given
971 walk recursively through the directory tree or a given
972 changeset, finding all files matched by the match
972 changeset, finding all files matched by the match
973 function
973 function
974 '''
974 '''
975 return self[node].walk(match)
975 return self[node].walk(match)
976
976
977 def status(self, node1='.', node2=None, match=None,
977 def status(self, node1='.', node2=None, match=None,
978 ignored=False, clean=False, unknown=False):
978 ignored=False, clean=False, unknown=False):
979 """return status of files between two nodes or node and working directory
979 """return status of files between two nodes or node and working directory
980
980
981 If node1 is None, use the first dirstate parent instead.
981 If node1 is None, use the first dirstate parent instead.
982 If node2 is None, compare node1 with working directory.
982 If node2 is None, compare node1 with working directory.
983 """
983 """
984
984
985 def mfmatches(ctx):
985 def mfmatches(ctx):
986 mf = ctx.manifest().copy()
986 mf = ctx.manifest().copy()
987 for fn in mf.keys():
987 for fn in mf.keys():
988 if not match(fn):
988 if not match(fn):
989 del mf[fn]
989 del mf[fn]
990 return mf
990 return mf
991
991
992 if isinstance(node1, context.changectx):
992 if isinstance(node1, context.changectx):
993 ctx1 = node1
993 ctx1 = node1
994 else:
994 else:
995 ctx1 = self[node1]
995 ctx1 = self[node1]
996 if isinstance(node2, context.changectx):
996 if isinstance(node2, context.changectx):
997 ctx2 = node2
997 ctx2 = node2
998 else:
998 else:
999 ctx2 = self[node2]
999 ctx2 = self[node2]
1000
1000
1001 working = ctx2.rev() is None
1001 working = ctx2.rev() is None
1002 parentworking = working and ctx1 == self['.']
1002 parentworking = working and ctx1 == self['.']
1003 match = match or match_.always(self.root, self.getcwd())
1003 match = match or match_.always(self.root, self.getcwd())
1004 listignored, listclean, listunknown = ignored, clean, unknown
1004 listignored, listclean, listunknown = ignored, clean, unknown
1005
1005
1006 # load earliest manifest first for caching reasons
1006 # load earliest manifest first for caching reasons
1007 if not working and ctx2.rev() < ctx1.rev():
1007 if not working and ctx2.rev() < ctx1.rev():
1008 ctx2.manifest()
1008 ctx2.manifest()
1009
1009
1010 if not parentworking:
1010 if not parentworking:
1011 def bad(f, msg):
1011 def bad(f, msg):
1012 if f not in ctx1:
1012 if f not in ctx1:
1013 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1013 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1014 match.bad = bad
1014 match.bad = bad
1015
1015
1016 if working: # we need to scan the working dir
1016 if working: # we need to scan the working dir
1017 s = self.dirstate.status(match, listignored, listclean, listunknown)
1017 s = self.dirstate.status(match, listignored, listclean, listunknown)
1018 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1018 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1019
1019
1020 # check for any possibly clean files
1020 # check for any possibly clean files
1021 if parentworking and cmp:
1021 if parentworking and cmp:
1022 fixup = []
1022 fixup = []
1023 # do a full compare of any files that might have changed
1023 # do a full compare of any files that might have changed
1024 for f in sorted(cmp):
1024 for f in sorted(cmp):
1025 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1025 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1026 or ctx1[f].cmp(ctx2[f].data())):
1026 or ctx1[f].cmp(ctx2[f].data())):
1027 modified.append(f)
1027 modified.append(f)
1028 else:
1028 else:
1029 fixup.append(f)
1029 fixup.append(f)
1030
1030
1031 if listclean:
1031 if listclean:
1032 clean += fixup
1032 clean += fixup
1033
1033
1034 # update dirstate for files that are actually clean
1034 # update dirstate for files that are actually clean
1035 if fixup:
1035 if fixup:
1036 try:
1036 try:
1037 # updating the dirstate is optional
1037 # updating the dirstate is optional
1038 # so we don't wait on the lock
1038 # so we don't wait on the lock
1039 wlock = self.wlock(False)
1039 wlock = self.wlock(False)
1040 try:
1040 try:
1041 for f in fixup:
1041 for f in fixup:
1042 self.dirstate.normal(f)
1042 self.dirstate.normal(f)
1043 finally:
1043 finally:
1044 wlock.release()
1044 wlock.release()
1045 except error.LockError:
1045 except error.LockError:
1046 pass
1046 pass
1047
1047
1048 if not parentworking:
1048 if not parentworking:
1049 mf1 = mfmatches(ctx1)
1049 mf1 = mfmatches(ctx1)
1050 if working:
1050 if working:
1051 # we are comparing working dir against non-parent
1051 # we are comparing working dir against non-parent
1052 # generate a pseudo-manifest for the working dir
1052 # generate a pseudo-manifest for the working dir
1053 mf2 = mfmatches(self['.'])
1053 mf2 = mfmatches(self['.'])
1054 for f in cmp + modified + added:
1054 for f in cmp + modified + added:
1055 mf2[f] = None
1055 mf2[f] = None
1056 mf2.set(f, ctx2.flags(f))
1056 mf2.set(f, ctx2.flags(f))
1057 for f in removed:
1057 for f in removed:
1058 if f in mf2:
1058 if f in mf2:
1059 del mf2[f]
1059 del mf2[f]
1060 else:
1060 else:
1061 # we are comparing two revisions
1061 # we are comparing two revisions
1062 deleted, unknown, ignored = [], [], []
1062 deleted, unknown, ignored = [], [], []
1063 mf2 = mfmatches(ctx2)
1063 mf2 = mfmatches(ctx2)
1064
1064
1065 modified, added, clean = [], [], []
1065 modified, added, clean = [], [], []
1066 for fn in mf2:
1066 for fn in mf2:
1067 if fn in mf1:
1067 if fn in mf1:
1068 if (mf1.flags(fn) != mf2.flags(fn) or
1068 if (mf1.flags(fn) != mf2.flags(fn) or
1069 (mf1[fn] != mf2[fn] and
1069 (mf1[fn] != mf2[fn] and
1070 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1070 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1071 modified.append(fn)
1071 modified.append(fn)
1072 elif listclean:
1072 elif listclean:
1073 clean.append(fn)
1073 clean.append(fn)
1074 del mf1[fn]
1074 del mf1[fn]
1075 else:
1075 else:
1076 added.append(fn)
1076 added.append(fn)
1077 removed = mf1.keys()
1077 removed = mf1.keys()
1078
1078
1079 r = modified, added, removed, deleted, unknown, ignored, clean
1079 r = modified, added, removed, deleted, unknown, ignored, clean
1080 [l.sort() for l in r]
1080 [l.sort() for l in r]
1081 return r
1081 return r
1082
1082
1083 def add(self, list):
1083 def add(self, list):
1084 wlock = self.wlock()
1084 wlock = self.wlock()
1085 try:
1085 try:
1086 rejected = []
1086 rejected = []
1087 for f in list:
1087 for f in list:
1088 p = self.wjoin(f)
1088 p = self.wjoin(f)
1089 try:
1089 try:
1090 st = os.lstat(p)
1090 st = os.lstat(p)
1091 except:
1091 except:
1092 self.ui.warn(_("%s does not exist!\n") % f)
1092 self.ui.warn(_("%s does not exist!\n") % f)
1093 rejected.append(f)
1093 rejected.append(f)
1094 continue
1094 continue
1095 if st.st_size > 10000000:
1095 if st.st_size > 10000000:
1096 self.ui.warn(_("%s: files over 10MB may cause memory and"
1096 self.ui.warn(_("%s: files over 10MB may cause memory and"
1097 " performance problems\n"
1097 " performance problems\n"
1098 "(use 'hg revert %s' to unadd the file)\n")
1098 "(use 'hg revert %s' to unadd the file)\n")
1099 % (f, f))
1099 % (f, f))
1100 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1100 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1101 self.ui.warn(_("%s not added: only files and symlinks "
1101 self.ui.warn(_("%s not added: only files and symlinks "
1102 "supported currently\n") % f)
1102 "supported currently\n") % f)
1103 rejected.append(p)
1103 rejected.append(p)
1104 elif self.dirstate[f] in 'amn':
1104 elif self.dirstate[f] in 'amn':
1105 self.ui.warn(_("%s already tracked!\n") % f)
1105 self.ui.warn(_("%s already tracked!\n") % f)
1106 elif self.dirstate[f] == 'r':
1106 elif self.dirstate[f] == 'r':
1107 self.dirstate.normallookup(f)
1107 self.dirstate.normallookup(f)
1108 else:
1108 else:
1109 self.dirstate.add(f)
1109 self.dirstate.add(f)
1110 return rejected
1110 return rejected
1111 finally:
1111 finally:
1112 wlock.release()
1112 wlock.release()
1113
1113
1114 def forget(self, list):
1114 def forget(self, list):
1115 wlock = self.wlock()
1115 wlock = self.wlock()
1116 try:
1116 try:
1117 for f in list:
1117 for f in list:
1118 if self.dirstate[f] != 'a':
1118 if self.dirstate[f] != 'a':
1119 self.ui.warn(_("%s not added!\n") % f)
1119 self.ui.warn(_("%s not added!\n") % f)
1120 else:
1120 else:
1121 self.dirstate.forget(f)
1121 self.dirstate.forget(f)
1122 finally:
1122 finally:
1123 wlock.release()
1123 wlock.release()
1124
1124
1125 def remove(self, list, unlink=False):
1125 def remove(self, list, unlink=False):
1126 if unlink:
1126 if unlink:
1127 for f in list:
1127 for f in list:
1128 try:
1128 try:
1129 util.unlink(self.wjoin(f))
1129 util.unlink(self.wjoin(f))
1130 except OSError, inst:
1130 except OSError, inst:
1131 if inst.errno != errno.ENOENT:
1131 if inst.errno != errno.ENOENT:
1132 raise
1132 raise
1133 wlock = self.wlock()
1133 wlock = self.wlock()
1134 try:
1134 try:
1135 for f in list:
1135 for f in list:
1136 if unlink and os.path.exists(self.wjoin(f)):
1136 if unlink and os.path.exists(self.wjoin(f)):
1137 self.ui.warn(_("%s still exists!\n") % f)
1137 self.ui.warn(_("%s still exists!\n") % f)
1138 elif self.dirstate[f] == 'a':
1138 elif self.dirstate[f] == 'a':
1139 self.dirstate.forget(f)
1139 self.dirstate.forget(f)
1140 elif f not in self.dirstate:
1140 elif f not in self.dirstate:
1141 self.ui.warn(_("%s not tracked!\n") % f)
1141 self.ui.warn(_("%s not tracked!\n") % f)
1142 else:
1142 else:
1143 self.dirstate.remove(f)
1143 self.dirstate.remove(f)
1144 finally:
1144 finally:
1145 wlock.release()
1145 wlock.release()
1146
1146
1147 def undelete(self, list):
1147 def undelete(self, list):
1148 manifests = [self.manifest.read(self.changelog.read(p)[0])
1148 manifests = [self.manifest.read(self.changelog.read(p)[0])
1149 for p in self.dirstate.parents() if p != nullid]
1149 for p in self.dirstate.parents() if p != nullid]
1150 wlock = self.wlock()
1150 wlock = self.wlock()
1151 try:
1151 try:
1152 for f in list:
1152 for f in list:
1153 if self.dirstate[f] != 'r':
1153 if self.dirstate[f] != 'r':
1154 self.ui.warn(_("%s not removed!\n") % f)
1154 self.ui.warn(_("%s not removed!\n") % f)
1155 else:
1155 else:
1156 m = f in manifests[0] and manifests[0] or manifests[1]
1156 m = f in manifests[0] and manifests[0] or manifests[1]
1157 t = self.file(f).read(m[f])
1157 t = self.file(f).read(m[f])
1158 self.wwrite(f, t, m.flags(f))
1158 self.wwrite(f, t, m.flags(f))
1159 self.dirstate.normal(f)
1159 self.dirstate.normal(f)
1160 finally:
1160 finally:
1161 wlock.release()
1161 wlock.release()
1162
1162
1163 def copy(self, source, dest):
1163 def copy(self, source, dest):
1164 p = self.wjoin(dest)
1164 p = self.wjoin(dest)
1165 if not (os.path.exists(p) or os.path.islink(p)):
1165 if not (os.path.exists(p) or os.path.islink(p)):
1166 self.ui.warn(_("%s does not exist!\n") % dest)
1166 self.ui.warn(_("%s does not exist!\n") % dest)
1167 elif not (os.path.isfile(p) or os.path.islink(p)):
1167 elif not (os.path.isfile(p) or os.path.islink(p)):
1168 self.ui.warn(_("copy failed: %s is not a file or a "
1168 self.ui.warn(_("copy failed: %s is not a file or a "
1169 "symbolic link\n") % dest)
1169 "symbolic link\n") % dest)
1170 else:
1170 else:
1171 wlock = self.wlock()
1171 wlock = self.wlock()
1172 try:
1172 try:
1173 if self.dirstate[dest] in '?r':
1173 if self.dirstate[dest] in '?r':
1174 self.dirstate.add(dest)
1174 self.dirstate.add(dest)
1175 self.dirstate.copy(source, dest)
1175 self.dirstate.copy(source, dest)
1176 finally:
1176 finally:
1177 wlock.release()
1177 wlock.release()
1178
1178
1179 def heads(self, start=None):
1179 def heads(self, start=None):
1180 heads = self.changelog.heads(start)
1180 heads = self.changelog.heads(start)
1181 # sort the output in rev descending order
1181 # sort the output in rev descending order
1182 heads = [(-self.changelog.rev(h), h) for h in heads]
1182 heads = [(-self.changelog.rev(h), h) for h in heads]
1183 return [n for (r, n) in sorted(heads)]
1183 return [n for (r, n) in sorted(heads)]
1184
1184
1185 def branchheads(self, branch=None, start=None, closed=False):
1185 def branchheads(self, branch=None, start=None, closed=False):
1186 '''return a (possibly filtered) list of heads for the given branch
1187
1188 Heads are returned in topological order, from newest to oldest.
1189 If branch is None, use the dirstate branch.
1190 If start is not None, return only heads reachable from start.
1191 If closed is True, return heads that are marked as closed as well.
1192 '''
1186 if branch is None:
1193 if branch is None:
1187 branch = self[None].branch()
1194 branch = self[None].branch()
1188 branches = self.branchmap()
1195 branches = self.branchmap()
1189 if branch not in branches:
1196 if branch not in branches:
1190 return []
1197 return []
1191 bheads = branches[branch]
1192 # the cache returns heads ordered lowest to highest
1198 # the cache returns heads ordered lowest to highest
1193 bheads.reverse()
1199 bheads = list(reversed(branches[branch]))
1194 if start is not None:
1200 if start is not None:
1195 # filter out the heads that cannot be reached from startrev
1201 # filter out the heads that cannot be reached from startrev
1196 bheads = self.changelog.nodesbetween([start], bheads)[2]
1202 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1203 bheads = [h for h in bheads if h in fbheads]
1197 if not closed:
1204 if not closed:
1198 bheads = [h for h in bheads if
1205 bheads = [h for h in bheads if
1199 ('close' not in self.changelog.read(h)[5])]
1206 ('close' not in self.changelog.read(h)[5])]
1200 return bheads
1207 return bheads
1201
1208
1202 def branches(self, nodes):
1209 def branches(self, nodes):
1203 if not nodes:
1210 if not nodes:
1204 nodes = [self.changelog.tip()]
1211 nodes = [self.changelog.tip()]
1205 b = []
1212 b = []
1206 for n in nodes:
1213 for n in nodes:
1207 t = n
1214 t = n
1208 while 1:
1215 while 1:
1209 p = self.changelog.parents(n)
1216 p = self.changelog.parents(n)
1210 if p[1] != nullid or p[0] == nullid:
1217 if p[1] != nullid or p[0] == nullid:
1211 b.append((t, n, p[0], p[1]))
1218 b.append((t, n, p[0], p[1]))
1212 break
1219 break
1213 n = p[0]
1220 n = p[0]
1214 return b
1221 return b
1215
1222
1216 def between(self, pairs):
1223 def between(self, pairs):
1217 r = []
1224 r = []
1218
1225
1219 for top, bottom in pairs:
1226 for top, bottom in pairs:
1220 n, l, i = top, [], 0
1227 n, l, i = top, [], 0
1221 f = 1
1228 f = 1
1222
1229
1223 while n != bottom and n != nullid:
1230 while n != bottom and n != nullid:
1224 p = self.changelog.parents(n)[0]
1231 p = self.changelog.parents(n)[0]
1225 if i == f:
1232 if i == f:
1226 l.append(n)
1233 l.append(n)
1227 f = f * 2
1234 f = f * 2
1228 n = p
1235 n = p
1229 i += 1
1236 i += 1
1230
1237
1231 r.append(l)
1238 r.append(l)
1232
1239
1233 return r
1240 return r
1234
1241
1235 def findincoming(self, remote, base=None, heads=None, force=False):
1242 def findincoming(self, remote, base=None, heads=None, force=False):
1236 """Return list of roots of the subsets of missing nodes from remote
1243 """Return list of roots of the subsets of missing nodes from remote
1237
1244
1238 If base dict is specified, assume that these nodes and their parents
1245 If base dict is specified, assume that these nodes and their parents
1239 exist on the remote side and that no child of a node of base exists
1246 exist on the remote side and that no child of a node of base exists
1240 in both remote and self.
1247 in both remote and self.
1241 Furthermore base will be updated to include the nodes that exists
1248 Furthermore base will be updated to include the nodes that exists
1242 in self and remote but no children exists in self and remote.
1249 in self and remote but no children exists in self and remote.
1243 If a list of heads is specified, return only nodes which are heads
1250 If a list of heads is specified, return only nodes which are heads
1244 or ancestors of these heads.
1251 or ancestors of these heads.
1245
1252
1246 All the ancestors of base are in self and in remote.
1253 All the ancestors of base are in self and in remote.
1247 All the descendants of the list returned are missing in self.
1254 All the descendants of the list returned are missing in self.
1248 (and so we know that the rest of the nodes are missing in remote, see
1255 (and so we know that the rest of the nodes are missing in remote, see
1249 outgoing)
1256 outgoing)
1250 """
1257 """
1251 return self.findcommonincoming(remote, base, heads, force)[1]
1258 return self.findcommonincoming(remote, base, heads, force)[1]
1252
1259
1253 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1260 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1254 """Return a tuple (common, missing roots, heads) used to identify
1261 """Return a tuple (common, missing roots, heads) used to identify
1255 missing nodes from remote.
1262 missing nodes from remote.
1256
1263
1257 If base dict is specified, assume that these nodes and their parents
1264 If base dict is specified, assume that these nodes and their parents
1258 exist on the remote side and that no child of a node of base exists
1265 exist on the remote side and that no child of a node of base exists
1259 in both remote and self.
1266 in both remote and self.
1260 Furthermore base will be updated to include the nodes that exists
1267 Furthermore base will be updated to include the nodes that exists
1261 in self and remote but no children exists in self and remote.
1268 in self and remote but no children exists in self and remote.
1262 If a list of heads is specified, return only nodes which are heads
1269 If a list of heads is specified, return only nodes which are heads
1263 or ancestors of these heads.
1270 or ancestors of these heads.
1264
1271
1265 All the ancestors of base are in self and in remote.
1272 All the ancestors of base are in self and in remote.
1266 """
1273 """
1267 m = self.changelog.nodemap
1274 m = self.changelog.nodemap
1268 search = []
1275 search = []
1269 fetch = set()
1276 fetch = set()
1270 seen = set()
1277 seen = set()
1271 seenbranch = set()
1278 seenbranch = set()
1272 if base is None:
1279 if base is None:
1273 base = {}
1280 base = {}
1274
1281
1275 if not heads:
1282 if not heads:
1276 heads = remote.heads()
1283 heads = remote.heads()
1277
1284
1278 if self.changelog.tip() == nullid:
1285 if self.changelog.tip() == nullid:
1279 base[nullid] = 1
1286 base[nullid] = 1
1280 if heads != [nullid]:
1287 if heads != [nullid]:
1281 return [nullid], [nullid], list(heads)
1288 return [nullid], [nullid], list(heads)
1282 return [nullid], [], []
1289 return [nullid], [], []
1283
1290
1284 # assume we're closer to the tip than the root
1291 # assume we're closer to the tip than the root
1285 # and start by examining the heads
1292 # and start by examining the heads
1286 self.ui.status(_("searching for changes\n"))
1293 self.ui.status(_("searching for changes\n"))
1287
1294
1288 unknown = []
1295 unknown = []
1289 for h in heads:
1296 for h in heads:
1290 if h not in m:
1297 if h not in m:
1291 unknown.append(h)
1298 unknown.append(h)
1292 else:
1299 else:
1293 base[h] = 1
1300 base[h] = 1
1294
1301
1295 heads = unknown
1302 heads = unknown
1296 if not unknown:
1303 if not unknown:
1297 return base.keys(), [], []
1304 return base.keys(), [], []
1298
1305
1299 req = set(unknown)
1306 req = set(unknown)
1300 reqcnt = 0
1307 reqcnt = 0
1301
1308
1302 # search through remote branches
1309 # search through remote branches
1303 # a 'branch' here is a linear segment of history, with four parts:
1310 # a 'branch' here is a linear segment of history, with four parts:
1304 # head, root, first parent, second parent
1311 # head, root, first parent, second parent
1305 # (a branch always has two parents (or none) by definition)
1312 # (a branch always has two parents (or none) by definition)
1306 unknown = remote.branches(unknown)
1313 unknown = remote.branches(unknown)
1307 while unknown:
1314 while unknown:
1308 r = []
1315 r = []
1309 while unknown:
1316 while unknown:
1310 n = unknown.pop(0)
1317 n = unknown.pop(0)
1311 if n[0] in seen:
1318 if n[0] in seen:
1312 continue
1319 continue
1313
1320
1314 self.ui.debug(_("examining %s:%s\n")
1321 self.ui.debug(_("examining %s:%s\n")
1315 % (short(n[0]), short(n[1])))
1322 % (short(n[0]), short(n[1])))
1316 if n[0] == nullid: # found the end of the branch
1323 if n[0] == nullid: # found the end of the branch
1317 pass
1324 pass
1318 elif n in seenbranch:
1325 elif n in seenbranch:
1319 self.ui.debug(_("branch already found\n"))
1326 self.ui.debug(_("branch already found\n"))
1320 continue
1327 continue
1321 elif n[1] and n[1] in m: # do we know the base?
1328 elif n[1] and n[1] in m: # do we know the base?
1322 self.ui.debug(_("found incomplete branch %s:%s\n")
1329 self.ui.debug(_("found incomplete branch %s:%s\n")
1323 % (short(n[0]), short(n[1])))
1330 % (short(n[0]), short(n[1])))
1324 search.append(n[0:2]) # schedule branch range for scanning
1331 search.append(n[0:2]) # schedule branch range for scanning
1325 seenbranch.add(n)
1332 seenbranch.add(n)
1326 else:
1333 else:
1327 if n[1] not in seen and n[1] not in fetch:
1334 if n[1] not in seen and n[1] not in fetch:
1328 if n[2] in m and n[3] in m:
1335 if n[2] in m and n[3] in m:
1329 self.ui.debug(_("found new changeset %s\n") %
1336 self.ui.debug(_("found new changeset %s\n") %
1330 short(n[1]))
1337 short(n[1]))
1331 fetch.add(n[1]) # earliest unknown
1338 fetch.add(n[1]) # earliest unknown
1332 for p in n[2:4]:
1339 for p in n[2:4]:
1333 if p in m:
1340 if p in m:
1334 base[p] = 1 # latest known
1341 base[p] = 1 # latest known
1335
1342
1336 for p in n[2:4]:
1343 for p in n[2:4]:
1337 if p not in req and p not in m:
1344 if p not in req and p not in m:
1338 r.append(p)
1345 r.append(p)
1339 req.add(p)
1346 req.add(p)
1340 seen.add(n[0])
1347 seen.add(n[0])
1341
1348
1342 if r:
1349 if r:
1343 reqcnt += 1
1350 reqcnt += 1
1344 self.ui.debug(_("request %d: %s\n") %
1351 self.ui.debug(_("request %d: %s\n") %
1345 (reqcnt, " ".join(map(short, r))))
1352 (reqcnt, " ".join(map(short, r))))
1346 for p in xrange(0, len(r), 10):
1353 for p in xrange(0, len(r), 10):
1347 for b in remote.branches(r[p:p+10]):
1354 for b in remote.branches(r[p:p+10]):
1348 self.ui.debug(_("received %s:%s\n") %
1355 self.ui.debug(_("received %s:%s\n") %
1349 (short(b[0]), short(b[1])))
1356 (short(b[0]), short(b[1])))
1350 unknown.append(b)
1357 unknown.append(b)
1351
1358
1352 # do binary search on the branches we found
1359 # do binary search on the branches we found
1353 while search:
1360 while search:
1354 newsearch = []
1361 newsearch = []
1355 reqcnt += 1
1362 reqcnt += 1
1356 for n, l in zip(search, remote.between(search)):
1363 for n, l in zip(search, remote.between(search)):
1357 l.append(n[1])
1364 l.append(n[1])
1358 p = n[0]
1365 p = n[0]
1359 f = 1
1366 f = 1
1360 for i in l:
1367 for i in l:
1361 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1368 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1362 if i in m:
1369 if i in m:
1363 if f <= 2:
1370 if f <= 2:
1364 self.ui.debug(_("found new branch changeset %s\n") %
1371 self.ui.debug(_("found new branch changeset %s\n") %
1365 short(p))
1372 short(p))
1366 fetch.add(p)
1373 fetch.add(p)
1367 base[i] = 1
1374 base[i] = 1
1368 else:
1375 else:
1369 self.ui.debug(_("narrowed branch search to %s:%s\n")
1376 self.ui.debug(_("narrowed branch search to %s:%s\n")
1370 % (short(p), short(i)))
1377 % (short(p), short(i)))
1371 newsearch.append((p, i))
1378 newsearch.append((p, i))
1372 break
1379 break
1373 p, f = i, f * 2
1380 p, f = i, f * 2
1374 search = newsearch
1381 search = newsearch
1375
1382
1376 # sanity check our fetch list
1383 # sanity check our fetch list
1377 for f in fetch:
1384 for f in fetch:
1378 if f in m:
1385 if f in m:
1379 raise error.RepoError(_("already have changeset ")
1386 raise error.RepoError(_("already have changeset ")
1380 + short(f[:4]))
1387 + short(f[:4]))
1381
1388
1382 if base.keys() == [nullid]:
1389 if base.keys() == [nullid]:
1383 if force:
1390 if force:
1384 self.ui.warn(_("warning: repository is unrelated\n"))
1391 self.ui.warn(_("warning: repository is unrelated\n"))
1385 else:
1392 else:
1386 raise util.Abort(_("repository is unrelated"))
1393 raise util.Abort(_("repository is unrelated"))
1387
1394
1388 self.ui.debug(_("found new changesets starting at ") +
1395 self.ui.debug(_("found new changesets starting at ") +
1389 " ".join([short(f) for f in fetch]) + "\n")
1396 " ".join([short(f) for f in fetch]) + "\n")
1390
1397
1391 self.ui.debug(_("%d total queries\n") % reqcnt)
1398 self.ui.debug(_("%d total queries\n") % reqcnt)
1392
1399
1393 return base.keys(), list(fetch), heads
1400 return base.keys(), list(fetch), heads
1394
1401
1395 def findoutgoing(self, remote, base=None, heads=None, force=False):
1402 def findoutgoing(self, remote, base=None, heads=None, force=False):
1396 """Return list of nodes that are roots of subsets not in remote
1403 """Return list of nodes that are roots of subsets not in remote
1397
1404
1398 If base dict is specified, assume that these nodes and their parents
1405 If base dict is specified, assume that these nodes and their parents
1399 exist on the remote side.
1406 exist on the remote side.
1400 If a list of heads is specified, return only nodes which are heads
1407 If a list of heads is specified, return only nodes which are heads
1401 or ancestors of these heads, and return a second element which
1408 or ancestors of these heads, and return a second element which
1402 contains all remote heads which get new children.
1409 contains all remote heads which get new children.
1403 """
1410 """
1404 if base is None:
1411 if base is None:
1405 base = {}
1412 base = {}
1406 self.findincoming(remote, base, heads, force=force)
1413 self.findincoming(remote, base, heads, force=force)
1407
1414
1408 self.ui.debug(_("common changesets up to ")
1415 self.ui.debug(_("common changesets up to ")
1409 + " ".join(map(short, base.keys())) + "\n")
1416 + " ".join(map(short, base.keys())) + "\n")
1410
1417
1411 remain = set(self.changelog.nodemap)
1418 remain = set(self.changelog.nodemap)
1412
1419
1413 # prune everything remote has from the tree
1420 # prune everything remote has from the tree
1414 remain.remove(nullid)
1421 remain.remove(nullid)
1415 remove = base.keys()
1422 remove = base.keys()
1416 while remove:
1423 while remove:
1417 n = remove.pop(0)
1424 n = remove.pop(0)
1418 if n in remain:
1425 if n in remain:
1419 remain.remove(n)
1426 remain.remove(n)
1420 for p in self.changelog.parents(n):
1427 for p in self.changelog.parents(n):
1421 remove.append(p)
1428 remove.append(p)
1422
1429
1423 # find every node whose parents have been pruned
1430 # find every node whose parents have been pruned
1424 subset = []
1431 subset = []
1425 # find every remote head that will get new children
1432 # find every remote head that will get new children
1426 updated_heads = set()
1433 updated_heads = set()
1427 for n in remain:
1434 for n in remain:
1428 p1, p2 = self.changelog.parents(n)
1435 p1, p2 = self.changelog.parents(n)
1429 if p1 not in remain and p2 not in remain:
1436 if p1 not in remain and p2 not in remain:
1430 subset.append(n)
1437 subset.append(n)
1431 if heads:
1438 if heads:
1432 if p1 in heads:
1439 if p1 in heads:
1433 updated_heads.add(p1)
1440 updated_heads.add(p1)
1434 if p2 in heads:
1441 if p2 in heads:
1435 updated_heads.add(p2)
1442 updated_heads.add(p2)
1436
1443
1437 # this is the set of all roots we have to push
1444 # this is the set of all roots we have to push
1438 if heads:
1445 if heads:
1439 return subset, list(updated_heads)
1446 return subset, list(updated_heads)
1440 else:
1447 else:
1441 return subset
1448 return subset
1442
1449
1443 def pull(self, remote, heads=None, force=False):
1450 def pull(self, remote, heads=None, force=False):
1444 lock = self.lock()
1451 lock = self.lock()
1445 try:
1452 try:
1446 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1453 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1447 force=force)
1454 force=force)
1448 if fetch == [nullid]:
1455 if fetch == [nullid]:
1449 self.ui.status(_("requesting all changes\n"))
1456 self.ui.status(_("requesting all changes\n"))
1450
1457
1451 if not fetch:
1458 if not fetch:
1452 self.ui.status(_("no changes found\n"))
1459 self.ui.status(_("no changes found\n"))
1453 return 0
1460 return 0
1454
1461
1455 if heads is None and remote.capable('changegroupsubset'):
1462 if heads is None and remote.capable('changegroupsubset'):
1456 heads = rheads
1463 heads = rheads
1457
1464
1458 if heads is None:
1465 if heads is None:
1459 cg = remote.changegroup(fetch, 'pull')
1466 cg = remote.changegroup(fetch, 'pull')
1460 else:
1467 else:
1461 if not remote.capable('changegroupsubset'):
1468 if not remote.capable('changegroupsubset'):
1462 raise util.Abort(_("Partial pull cannot be done because "
1469 raise util.Abort(_("Partial pull cannot be done because "
1463 "other repository doesn't support "
1470 "other repository doesn't support "
1464 "changegroupsubset."))
1471 "changegroupsubset."))
1465 cg = remote.changegroupsubset(fetch, heads, 'pull')
1472 cg = remote.changegroupsubset(fetch, heads, 'pull')
1466 return self.addchangegroup(cg, 'pull', remote.url())
1473 return self.addchangegroup(cg, 'pull', remote.url())
1467 finally:
1474 finally:
1468 lock.release()
1475 lock.release()
1469
1476
1470 def push(self, remote, force=False, revs=None):
1477 def push(self, remote, force=False, revs=None):
1471 # there are two ways to push to remote repo:
1478 # there are two ways to push to remote repo:
1472 #
1479 #
1473 # addchangegroup assumes local user can lock remote
1480 # addchangegroup assumes local user can lock remote
1474 # repo (local filesystem, old ssh servers).
1481 # repo (local filesystem, old ssh servers).
1475 #
1482 #
1476 # unbundle assumes local user cannot lock remote repo (new ssh
1483 # unbundle assumes local user cannot lock remote repo (new ssh
1477 # servers, http servers).
1484 # servers, http servers).
1478
1485
1479 if remote.capable('unbundle'):
1486 if remote.capable('unbundle'):
1480 return self.push_unbundle(remote, force, revs)
1487 return self.push_unbundle(remote, force, revs)
1481 return self.push_addchangegroup(remote, force, revs)
1488 return self.push_addchangegroup(remote, force, revs)
1482
1489
1483 def prepush(self, remote, force, revs):
1490 def prepush(self, remote, force, revs):
1484 common = {}
1491 common = {}
1485 remote_heads = remote.heads()
1492 remote_heads = remote.heads()
1486 inc = self.findincoming(remote, common, remote_heads, force=force)
1493 inc = self.findincoming(remote, common, remote_heads, force=force)
1487
1494
1488 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1495 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1489 if revs is not None:
1496 if revs is not None:
1490 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1497 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1491 else:
1498 else:
1492 bases, heads = update, self.changelog.heads()
1499 bases, heads = update, self.changelog.heads()
1493
1500
1494 def checkbranch(lheads, rheads, updatelh):
1501 def checkbranch(lheads, rheads, updatelh):
1495 '''
1502 '''
1496 check whether there are more local heads than remote heads on
1503 check whether there are more local heads than remote heads on
1497 a specific branch.
1504 a specific branch.
1498
1505
1499 lheads: local branch heads
1506 lheads: local branch heads
1500 rheads: remote branch heads
1507 rheads: remote branch heads
1501 updatelh: outgoing local branch heads
1508 updatelh: outgoing local branch heads
1502 '''
1509 '''
1503
1510
1504 warn = 0
1511 warn = 0
1505
1512
1506 if not revs and len(lheads) > len(rheads):
1513 if not revs and len(lheads) > len(rheads):
1507 warn = 1
1514 warn = 1
1508 else:
1515 else:
1509 updatelheads = [self.changelog.heads(x, lheads)
1516 updatelheads = [self.changelog.heads(x, lheads)
1510 for x in updatelh]
1517 for x in updatelh]
1511 newheads = set(sum(updatelheads, [])) & set(lheads)
1518 newheads = set(sum(updatelheads, [])) & set(lheads)
1512
1519
1513 if not newheads:
1520 if not newheads:
1514 return True
1521 return True
1515
1522
1516 for r in rheads:
1523 for r in rheads:
1517 if r in self.changelog.nodemap:
1524 if r in self.changelog.nodemap:
1518 desc = self.changelog.heads(r, heads)
1525 desc = self.changelog.heads(r, heads)
1519 l = [h for h in heads if h in desc]
1526 l = [h for h in heads if h in desc]
1520 if not l:
1527 if not l:
1521 newheads.add(r)
1528 newheads.add(r)
1522 else:
1529 else:
1523 newheads.add(r)
1530 newheads.add(r)
1524 if len(newheads) > len(rheads):
1531 if len(newheads) > len(rheads):
1525 warn = 1
1532 warn = 1
1526
1533
1527 if warn:
1534 if warn:
1528 if not rheads: # new branch requires --force
1535 if not rheads: # new branch requires --force
1529 self.ui.warn(_("abort: push creates new"
1536 self.ui.warn(_("abort: push creates new"
1530 " remote branch '%s'!\n") %
1537 " remote branch '%s'!\n") %
1531 self[updatelh[0]].branch())
1538 self[updatelh[0]].branch())
1532 else:
1539 else:
1533 self.ui.warn(_("abort: push creates new remote heads!\n"))
1540 self.ui.warn(_("abort: push creates new remote heads!\n"))
1534
1541
1535 self.ui.status(_("(did you forget to merge?"
1542 self.ui.status(_("(did you forget to merge?"
1536 " use push -f to force)\n"))
1543 " use push -f to force)\n"))
1537 return False
1544 return False
1538 return True
1545 return True
1539
1546
1540 if not bases:
1547 if not bases:
1541 self.ui.status(_("no changes found\n"))
1548 self.ui.status(_("no changes found\n"))
1542 return None, 1
1549 return None, 1
1543 elif not force:
1550 elif not force:
1544 # Check for each named branch if we're creating new remote heads.
1551 # Check for each named branch if we're creating new remote heads.
1545 # To be a remote head after push, node must be either:
1552 # To be a remote head after push, node must be either:
1546 # - unknown locally
1553 # - unknown locally
1547 # - a local outgoing head descended from update
1554 # - a local outgoing head descended from update
1548 # - a remote head that's known locally and not
1555 # - a remote head that's known locally and not
1549 # ancestral to an outgoing head
1556 # ancestral to an outgoing head
1550 #
1557 #
1551 # New named branches cannot be created without --force.
1558 # New named branches cannot be created without --force.
1552
1559
1553 if remote_heads != [nullid]:
1560 if remote_heads != [nullid]:
1554 if remote.capable('branchmap'):
1561 if remote.capable('branchmap'):
1555 localhds = {}
1562 localhds = {}
1556 if not revs:
1563 if not revs:
1557 localhds = self.branchmap()
1564 localhds = self.branchmap()
1558 else:
1565 else:
1559 for n in heads:
1566 for n in heads:
1560 branch = self[n].branch()
1567 branch = self[n].branch()
1561 if branch in localhds:
1568 if branch in localhds:
1562 localhds[branch].append(n)
1569 localhds[branch].append(n)
1563 else:
1570 else:
1564 localhds[branch] = [n]
1571 localhds[branch] = [n]
1565
1572
1566 remotehds = remote.branchmap()
1573 remotehds = remote.branchmap()
1567
1574
1568 for lh in localhds:
1575 for lh in localhds:
1569 if lh in remotehds:
1576 if lh in remotehds:
1570 rheads = remotehds[lh]
1577 rheads = remotehds[lh]
1571 else:
1578 else:
1572 rheads = []
1579 rheads = []
1573 lheads = localhds[lh]
1580 lheads = localhds[lh]
1574 updatelh = [upd for upd in update
1581 updatelh = [upd for upd in update
1575 if self[upd].branch() == lh]
1582 if self[upd].branch() == lh]
1576 if not updatelh:
1583 if not updatelh:
1577 continue
1584 continue
1578 if not checkbranch(lheads, rheads, updatelh):
1585 if not checkbranch(lheads, rheads, updatelh):
1579 return None, 0
1586 return None, 0
1580 else:
1587 else:
1581 if not checkbranch(heads, remote_heads, update):
1588 if not checkbranch(heads, remote_heads, update):
1582 return None, 0
1589 return None, 0
1583
1590
1584 if inc:
1591 if inc:
1585 self.ui.warn(_("note: unsynced remote changes!\n"))
1592 self.ui.warn(_("note: unsynced remote changes!\n"))
1586
1593
1587
1594
1588 if revs is None:
1595 if revs is None:
1589 # use the fast path, no race possible on push
1596 # use the fast path, no race possible on push
1590 cg = self._changegroup(common.keys(), 'push')
1597 cg = self._changegroup(common.keys(), 'push')
1591 else:
1598 else:
1592 cg = self.changegroupsubset(update, revs, 'push')
1599 cg = self.changegroupsubset(update, revs, 'push')
1593 return cg, remote_heads
1600 return cg, remote_heads
1594
1601
1595 def push_addchangegroup(self, remote, force, revs):
1602 def push_addchangegroup(self, remote, force, revs):
1596 lock = remote.lock()
1603 lock = remote.lock()
1597 try:
1604 try:
1598 ret = self.prepush(remote, force, revs)
1605 ret = self.prepush(remote, force, revs)
1599 if ret[0] is not None:
1606 if ret[0] is not None:
1600 cg, remote_heads = ret
1607 cg, remote_heads = ret
1601 return remote.addchangegroup(cg, 'push', self.url())
1608 return remote.addchangegroup(cg, 'push', self.url())
1602 return ret[1]
1609 return ret[1]
1603 finally:
1610 finally:
1604 lock.release()
1611 lock.release()
1605
1612
1606 def push_unbundle(self, remote, force, revs):
1613 def push_unbundle(self, remote, force, revs):
1607 # local repo finds heads on server, finds out what revs it
1614 # local repo finds heads on server, finds out what revs it
1608 # must push. once revs transferred, if server finds it has
1615 # must push. once revs transferred, if server finds it has
1609 # different heads (someone else won commit/push race), server
1616 # different heads (someone else won commit/push race), server
1610 # aborts.
1617 # aborts.
1611
1618
1612 ret = self.prepush(remote, force, revs)
1619 ret = self.prepush(remote, force, revs)
1613 if ret[0] is not None:
1620 if ret[0] is not None:
1614 cg, remote_heads = ret
1621 cg, remote_heads = ret
1615 if force: remote_heads = ['force']
1622 if force: remote_heads = ['force']
1616 return remote.unbundle(cg, remote_heads, 'push')
1623 return remote.unbundle(cg, remote_heads, 'push')
1617 return ret[1]
1624 return ret[1]
1618
1625
1619 def changegroupinfo(self, nodes, source):
1626 def changegroupinfo(self, nodes, source):
1620 if self.ui.verbose or source == 'bundle':
1627 if self.ui.verbose or source == 'bundle':
1621 self.ui.status(_("%d changesets found\n") % len(nodes))
1628 self.ui.status(_("%d changesets found\n") % len(nodes))
1622 if self.ui.debugflag:
1629 if self.ui.debugflag:
1623 self.ui.debug(_("list of changesets:\n"))
1630 self.ui.debug(_("list of changesets:\n"))
1624 for node in nodes:
1631 for node in nodes:
1625 self.ui.debug("%s\n" % hex(node))
1632 self.ui.debug("%s\n" % hex(node))
1626
1633
1627 def changegroupsubset(self, bases, heads, source, extranodes=None):
1634 def changegroupsubset(self, bases, heads, source, extranodes=None):
1628 """This function generates a changegroup consisting of all the nodes
1635 """This function generates a changegroup consisting of all the nodes
1629 that are descendents of any of the bases, and ancestors of any of
1636 that are descendents of any of the bases, and ancestors of any of
1630 the heads.
1637 the heads.
1631
1638
1632 It is fairly complex as determining which filenodes and which
1639 It is fairly complex as determining which filenodes and which
1633 manifest nodes need to be included for the changeset to be complete
1640 manifest nodes need to be included for the changeset to be complete
1634 is non-trivial.
1641 is non-trivial.
1635
1642
1636 Another wrinkle is doing the reverse, figuring out which changeset in
1643 Another wrinkle is doing the reverse, figuring out which changeset in
1637 the changegroup a particular filenode or manifestnode belongs to.
1644 the changegroup a particular filenode or manifestnode belongs to.
1638
1645
1639 The caller can specify some nodes that must be included in the
1646 The caller can specify some nodes that must be included in the
1640 changegroup using the extranodes argument. It should be a dict
1647 changegroup using the extranodes argument. It should be a dict
1641 where the keys are the filenames (or 1 for the manifest), and the
1648 where the keys are the filenames (or 1 for the manifest), and the
1642 values are lists of (node, linknode) tuples, where node is a wanted
1649 values are lists of (node, linknode) tuples, where node is a wanted
1643 node and linknode is the changelog node that should be transmitted as
1650 node and linknode is the changelog node that should be transmitted as
1644 the linkrev.
1651 the linkrev.
1645 """
1652 """
1646
1653
1647 if extranodes is None:
1654 if extranodes is None:
1648 # can we go through the fast path ?
1655 # can we go through the fast path ?
1649 heads.sort()
1656 heads.sort()
1650 allheads = self.heads()
1657 allheads = self.heads()
1651 allheads.sort()
1658 allheads.sort()
1652 if heads == allheads:
1659 if heads == allheads:
1653 common = []
1660 common = []
1654 # parents of bases are known from both sides
1661 # parents of bases are known from both sides
1655 for n in bases:
1662 for n in bases:
1656 for p in self.changelog.parents(n):
1663 for p in self.changelog.parents(n):
1657 if p != nullid:
1664 if p != nullid:
1658 common.append(p)
1665 common.append(p)
1659 return self._changegroup(common, source)
1666 return self._changegroup(common, source)
1660
1667
1661 self.hook('preoutgoing', throw=True, source=source)
1668 self.hook('preoutgoing', throw=True, source=source)
1662
1669
1663 # Set up some initial variables
1670 # Set up some initial variables
1664 # Make it easy to refer to self.changelog
1671 # Make it easy to refer to self.changelog
1665 cl = self.changelog
1672 cl = self.changelog
1666 # msng is short for missing - compute the list of changesets in this
1673 # msng is short for missing - compute the list of changesets in this
1667 # changegroup.
1674 # changegroup.
1668 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1675 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1669 self.changegroupinfo(msng_cl_lst, source)
1676 self.changegroupinfo(msng_cl_lst, source)
1670 # Some bases may turn out to be superfluous, and some heads may be
1677 # Some bases may turn out to be superfluous, and some heads may be
1671 # too. nodesbetween will return the minimal set of bases and heads
1678 # too. nodesbetween will return the minimal set of bases and heads
1672 # necessary to re-create the changegroup.
1679 # necessary to re-create the changegroup.
1673
1680
1674 # Known heads are the list of heads that it is assumed the recipient
1681 # Known heads are the list of heads that it is assumed the recipient
1675 # of this changegroup will know about.
1682 # of this changegroup will know about.
1676 knownheads = set()
1683 knownheads = set()
1677 # We assume that all parents of bases are known heads.
1684 # We assume that all parents of bases are known heads.
1678 for n in bases:
1685 for n in bases:
1679 knownheads.update(cl.parents(n))
1686 knownheads.update(cl.parents(n))
1680 knownheads.discard(nullid)
1687 knownheads.discard(nullid)
1681 knownheads = list(knownheads)
1688 knownheads = list(knownheads)
1682 if knownheads:
1689 if knownheads:
1683 # Now that we know what heads are known, we can compute which
1690 # Now that we know what heads are known, we can compute which
1684 # changesets are known. The recipient must know about all
1691 # changesets are known. The recipient must know about all
1685 # changesets required to reach the known heads from the null
1692 # changesets required to reach the known heads from the null
1686 # changeset.
1693 # changeset.
1687 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1694 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1688 junk = None
1695 junk = None
1689 # Transform the list into a set.
1696 # Transform the list into a set.
1690 has_cl_set = set(has_cl_set)
1697 has_cl_set = set(has_cl_set)
1691 else:
1698 else:
1692 # If there were no known heads, the recipient cannot be assumed to
1699 # If there were no known heads, the recipient cannot be assumed to
1693 # know about any changesets.
1700 # know about any changesets.
1694 has_cl_set = set()
1701 has_cl_set = set()
1695
1702
1696 # Make it easy to refer to self.manifest
1703 # Make it easy to refer to self.manifest
1697 mnfst = self.manifest
1704 mnfst = self.manifest
1698 # We don't know which manifests are missing yet
1705 # We don't know which manifests are missing yet
1699 msng_mnfst_set = {}
1706 msng_mnfst_set = {}
1700 # Nor do we know which filenodes are missing.
1707 # Nor do we know which filenodes are missing.
1701 msng_filenode_set = {}
1708 msng_filenode_set = {}
1702
1709
1703 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1710 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1704 junk = None
1711 junk = None
1705
1712
1706 # A changeset always belongs to itself, so the changenode lookup
1713 # A changeset always belongs to itself, so the changenode lookup
1707 # function for a changenode is identity.
1714 # function for a changenode is identity.
1708 def identity(x):
1715 def identity(x):
1709 return x
1716 return x
1710
1717
1711 # A function generating function. Sets up an environment for the
1718 # A function generating function. Sets up an environment for the
1712 # inner function.
1719 # inner function.
1713 def cmp_by_rev_func(revlog):
1720 def cmp_by_rev_func(revlog):
1714 # Compare two nodes by their revision number in the environment's
1721 # Compare two nodes by their revision number in the environment's
1715 # revision history. Since the revision number both represents the
1722 # revision history. Since the revision number both represents the
1716 # most efficient order to read the nodes in, and represents a
1723 # most efficient order to read the nodes in, and represents a
1717 # topological sorting of the nodes, this function is often useful.
1724 # topological sorting of the nodes, this function is often useful.
1718 def cmp_by_rev(a, b):
1725 def cmp_by_rev(a, b):
1719 return cmp(revlog.rev(a), revlog.rev(b))
1726 return cmp(revlog.rev(a), revlog.rev(b))
1720 return cmp_by_rev
1727 return cmp_by_rev
1721
1728
1722 # If we determine that a particular file or manifest node must be a
1729 # If we determine that a particular file or manifest node must be a
1723 # node that the recipient of the changegroup will already have, we can
1730 # node that the recipient of the changegroup will already have, we can
1724 # also assume the recipient will have all the parents. This function
1731 # also assume the recipient will have all the parents. This function
1725 # prunes them from the set of missing nodes.
1732 # prunes them from the set of missing nodes.
1726 def prune_parents(revlog, hasset, msngset):
1733 def prune_parents(revlog, hasset, msngset):
1727 haslst = list(hasset)
1734 haslst = list(hasset)
1728 haslst.sort(cmp_by_rev_func(revlog))
1735 haslst.sort(cmp_by_rev_func(revlog))
1729 for node in haslst:
1736 for node in haslst:
1730 parentlst = [p for p in revlog.parents(node) if p != nullid]
1737 parentlst = [p for p in revlog.parents(node) if p != nullid]
1731 while parentlst:
1738 while parentlst:
1732 n = parentlst.pop()
1739 n = parentlst.pop()
1733 if n not in hasset:
1740 if n not in hasset:
1734 hasset.add(n)
1741 hasset.add(n)
1735 p = [p for p in revlog.parents(n) if p != nullid]
1742 p = [p for p in revlog.parents(n) if p != nullid]
1736 parentlst.extend(p)
1743 parentlst.extend(p)
1737 for n in hasset:
1744 for n in hasset:
1738 msngset.pop(n, None)
1745 msngset.pop(n, None)
1739
1746
1740 # This is a function generating function used to set up an environment
1747 # This is a function generating function used to set up an environment
1741 # for the inner function to execute in.
1748 # for the inner function to execute in.
1742 def manifest_and_file_collector(changedfileset):
1749 def manifest_and_file_collector(changedfileset):
1743 # This is an information gathering function that gathers
1750 # This is an information gathering function that gathers
1744 # information from each changeset node that goes out as part of
1751 # information from each changeset node that goes out as part of
1745 # the changegroup. The information gathered is a list of which
1752 # the changegroup. The information gathered is a list of which
1746 # manifest nodes are potentially required (the recipient may
1753 # manifest nodes are potentially required (the recipient may
1747 # already have them) and total list of all files which were
1754 # already have them) and total list of all files which were
1748 # changed in any changeset in the changegroup.
1755 # changed in any changeset in the changegroup.
1749 #
1756 #
1750 # We also remember the first changenode we saw any manifest
1757 # We also remember the first changenode we saw any manifest
1751 # referenced by so we can later determine which changenode 'owns'
1758 # referenced by so we can later determine which changenode 'owns'
1752 # the manifest.
1759 # the manifest.
1753 def collect_manifests_and_files(clnode):
1760 def collect_manifests_and_files(clnode):
1754 c = cl.read(clnode)
1761 c = cl.read(clnode)
1755 for f in c[3]:
1762 for f in c[3]:
1756 # This is to make sure we only have one instance of each
1763 # This is to make sure we only have one instance of each
1757 # filename string for each filename.
1764 # filename string for each filename.
1758 changedfileset.setdefault(f, f)
1765 changedfileset.setdefault(f, f)
1759 msng_mnfst_set.setdefault(c[0], clnode)
1766 msng_mnfst_set.setdefault(c[0], clnode)
1760 return collect_manifests_and_files
1767 return collect_manifests_and_files
1761
1768
1762 # Figure out which manifest nodes (of the ones we think might be part
1769 # Figure out which manifest nodes (of the ones we think might be part
1763 # of the changegroup) the recipient must know about and remove them
1770 # of the changegroup) the recipient must know about and remove them
1764 # from the changegroup.
1771 # from the changegroup.
1765 def prune_manifests():
1772 def prune_manifests():
1766 has_mnfst_set = set()
1773 has_mnfst_set = set()
1767 for n in msng_mnfst_set:
1774 for n in msng_mnfst_set:
1768 # If a 'missing' manifest thinks it belongs to a changenode
1775 # If a 'missing' manifest thinks it belongs to a changenode
1769 # the recipient is assumed to have, obviously the recipient
1776 # the recipient is assumed to have, obviously the recipient
1770 # must have that manifest.
1777 # must have that manifest.
1771 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1778 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1772 if linknode in has_cl_set:
1779 if linknode in has_cl_set:
1773 has_mnfst_set.add(n)
1780 has_mnfst_set.add(n)
1774 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1781 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1775
1782
1776 # Use the information collected in collect_manifests_and_files to say
1783 # Use the information collected in collect_manifests_and_files to say
1777 # which changenode any manifestnode belongs to.
1784 # which changenode any manifestnode belongs to.
1778 def lookup_manifest_link(mnfstnode):
1785 def lookup_manifest_link(mnfstnode):
1779 return msng_mnfst_set[mnfstnode]
1786 return msng_mnfst_set[mnfstnode]
1780
1787
1781 # A function generating function that sets up the initial environment
1788 # A function generating function that sets up the initial environment
1782 # the inner function.
1789 # the inner function.
1783 def filenode_collector(changedfiles):
1790 def filenode_collector(changedfiles):
1784 next_rev = [0]
1791 next_rev = [0]
1785 # This gathers information from each manifestnode included in the
1792 # This gathers information from each manifestnode included in the
1786 # changegroup about which filenodes the manifest node references
1793 # changegroup about which filenodes the manifest node references
1787 # so we can include those in the changegroup too.
1794 # so we can include those in the changegroup too.
1788 #
1795 #
1789 # It also remembers which changenode each filenode belongs to. It
1796 # It also remembers which changenode each filenode belongs to. It
1790 # does this by assuming the a filenode belongs to the changenode
1797 # does this by assuming the a filenode belongs to the changenode
1791 # the first manifest that references it belongs to.
1798 # the first manifest that references it belongs to.
1792 def collect_msng_filenodes(mnfstnode):
1799 def collect_msng_filenodes(mnfstnode):
1793 r = mnfst.rev(mnfstnode)
1800 r = mnfst.rev(mnfstnode)
1794 if r == next_rev[0]:
1801 if r == next_rev[0]:
1795 # If the last rev we looked at was the one just previous,
1802 # If the last rev we looked at was the one just previous,
1796 # we only need to see a diff.
1803 # we only need to see a diff.
1797 deltamf = mnfst.readdelta(mnfstnode)
1804 deltamf = mnfst.readdelta(mnfstnode)
1798 # For each line in the delta
1805 # For each line in the delta
1799 for f, fnode in deltamf.iteritems():
1806 for f, fnode in deltamf.iteritems():
1800 f = changedfiles.get(f, None)
1807 f = changedfiles.get(f, None)
1801 # And if the file is in the list of files we care
1808 # And if the file is in the list of files we care
1802 # about.
1809 # about.
1803 if f is not None:
1810 if f is not None:
1804 # Get the changenode this manifest belongs to
1811 # Get the changenode this manifest belongs to
1805 clnode = msng_mnfst_set[mnfstnode]
1812 clnode = msng_mnfst_set[mnfstnode]
1806 # Create the set of filenodes for the file if
1813 # Create the set of filenodes for the file if
1807 # there isn't one already.
1814 # there isn't one already.
1808 ndset = msng_filenode_set.setdefault(f, {})
1815 ndset = msng_filenode_set.setdefault(f, {})
1809 # And set the filenode's changelog node to the
1816 # And set the filenode's changelog node to the
1810 # manifest's if it hasn't been set already.
1817 # manifest's if it hasn't been set already.
1811 ndset.setdefault(fnode, clnode)
1818 ndset.setdefault(fnode, clnode)
1812 else:
1819 else:
1813 # Otherwise we need a full manifest.
1820 # Otherwise we need a full manifest.
1814 m = mnfst.read(mnfstnode)
1821 m = mnfst.read(mnfstnode)
1815 # For every file in we care about.
1822 # For every file in we care about.
1816 for f in changedfiles:
1823 for f in changedfiles:
1817 fnode = m.get(f, None)
1824 fnode = m.get(f, None)
1818 # If it's in the manifest
1825 # If it's in the manifest
1819 if fnode is not None:
1826 if fnode is not None:
1820 # See comments above.
1827 # See comments above.
1821 clnode = msng_mnfst_set[mnfstnode]
1828 clnode = msng_mnfst_set[mnfstnode]
1822 ndset = msng_filenode_set.setdefault(f, {})
1829 ndset = msng_filenode_set.setdefault(f, {})
1823 ndset.setdefault(fnode, clnode)
1830 ndset.setdefault(fnode, clnode)
1824 # Remember the revision we hope to see next.
1831 # Remember the revision we hope to see next.
1825 next_rev[0] = r + 1
1832 next_rev[0] = r + 1
1826 return collect_msng_filenodes
1833 return collect_msng_filenodes
1827
1834
1828 # We have a list of filenodes we think we need for a file, lets remove
1835 # We have a list of filenodes we think we need for a file, lets remove
1829 # all those we know the recipient must have.
1836 # all those we know the recipient must have.
1830 def prune_filenodes(f, filerevlog):
1837 def prune_filenodes(f, filerevlog):
1831 msngset = msng_filenode_set[f]
1838 msngset = msng_filenode_set[f]
1832 hasset = set()
1839 hasset = set()
1833 # If a 'missing' filenode thinks it belongs to a changenode we
1840 # If a 'missing' filenode thinks it belongs to a changenode we
1834 # assume the recipient must have, then the recipient must have
1841 # assume the recipient must have, then the recipient must have
1835 # that filenode.
1842 # that filenode.
1836 for n in msngset:
1843 for n in msngset:
1837 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1844 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1838 if clnode in has_cl_set:
1845 if clnode in has_cl_set:
1839 hasset.add(n)
1846 hasset.add(n)
1840 prune_parents(filerevlog, hasset, msngset)
1847 prune_parents(filerevlog, hasset, msngset)
1841
1848
1842 # A function generator function that sets up the a context for the
1849 # A function generator function that sets up the a context for the
1843 # inner function.
1850 # inner function.
1844 def lookup_filenode_link_func(fname):
1851 def lookup_filenode_link_func(fname):
1845 msngset = msng_filenode_set[fname]
1852 msngset = msng_filenode_set[fname]
1846 # Lookup the changenode the filenode belongs to.
1853 # Lookup the changenode the filenode belongs to.
1847 def lookup_filenode_link(fnode):
1854 def lookup_filenode_link(fnode):
1848 return msngset[fnode]
1855 return msngset[fnode]
1849 return lookup_filenode_link
1856 return lookup_filenode_link
1850
1857
1851 # Add the nodes that were explicitly requested.
1858 # Add the nodes that were explicitly requested.
1852 def add_extra_nodes(name, nodes):
1859 def add_extra_nodes(name, nodes):
1853 if not extranodes or name not in extranodes:
1860 if not extranodes or name not in extranodes:
1854 return
1861 return
1855
1862
1856 for node, linknode in extranodes[name]:
1863 for node, linknode in extranodes[name]:
1857 if node not in nodes:
1864 if node not in nodes:
1858 nodes[node] = linknode
1865 nodes[node] = linknode
1859
1866
1860 # Now that we have all theses utility functions to help out and
1867 # Now that we have all theses utility functions to help out and
1861 # logically divide up the task, generate the group.
1868 # logically divide up the task, generate the group.
1862 def gengroup():
1869 def gengroup():
1863 # The set of changed files starts empty.
1870 # The set of changed files starts empty.
1864 changedfiles = {}
1871 changedfiles = {}
1865 # Create a changenode group generator that will call our functions
1872 # Create a changenode group generator that will call our functions
1866 # back to lookup the owning changenode and collect information.
1873 # back to lookup the owning changenode and collect information.
1867 group = cl.group(msng_cl_lst, identity,
1874 group = cl.group(msng_cl_lst, identity,
1868 manifest_and_file_collector(changedfiles))
1875 manifest_and_file_collector(changedfiles))
1869 for chnk in group:
1876 for chnk in group:
1870 yield chnk
1877 yield chnk
1871
1878
1872 # The list of manifests has been collected by the generator
1879 # The list of manifests has been collected by the generator
1873 # calling our functions back.
1880 # calling our functions back.
1874 prune_manifests()
1881 prune_manifests()
1875 add_extra_nodes(1, msng_mnfst_set)
1882 add_extra_nodes(1, msng_mnfst_set)
1876 msng_mnfst_lst = msng_mnfst_set.keys()
1883 msng_mnfst_lst = msng_mnfst_set.keys()
1877 # Sort the manifestnodes by revision number.
1884 # Sort the manifestnodes by revision number.
1878 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1885 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1879 # Create a generator for the manifestnodes that calls our lookup
1886 # Create a generator for the manifestnodes that calls our lookup
1880 # and data collection functions back.
1887 # and data collection functions back.
1881 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1888 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1882 filenode_collector(changedfiles))
1889 filenode_collector(changedfiles))
1883 for chnk in group:
1890 for chnk in group:
1884 yield chnk
1891 yield chnk
1885
1892
1886 # These are no longer needed, dereference and toss the memory for
1893 # These are no longer needed, dereference and toss the memory for
1887 # them.
1894 # them.
1888 msng_mnfst_lst = None
1895 msng_mnfst_lst = None
1889 msng_mnfst_set.clear()
1896 msng_mnfst_set.clear()
1890
1897
1891 if extranodes:
1898 if extranodes:
1892 for fname in extranodes:
1899 for fname in extranodes:
1893 if isinstance(fname, int):
1900 if isinstance(fname, int):
1894 continue
1901 continue
1895 msng_filenode_set.setdefault(fname, {})
1902 msng_filenode_set.setdefault(fname, {})
1896 changedfiles[fname] = 1
1903 changedfiles[fname] = 1
1897 # Go through all our files in order sorted by name.
1904 # Go through all our files in order sorted by name.
1898 for fname in sorted(changedfiles):
1905 for fname in sorted(changedfiles):
1899 filerevlog = self.file(fname)
1906 filerevlog = self.file(fname)
1900 if not len(filerevlog):
1907 if not len(filerevlog):
1901 raise util.Abort(_("empty or missing revlog for %s") % fname)
1908 raise util.Abort(_("empty or missing revlog for %s") % fname)
1902 # Toss out the filenodes that the recipient isn't really
1909 # Toss out the filenodes that the recipient isn't really
1903 # missing.
1910 # missing.
1904 if fname in msng_filenode_set:
1911 if fname in msng_filenode_set:
1905 prune_filenodes(fname, filerevlog)
1912 prune_filenodes(fname, filerevlog)
1906 add_extra_nodes(fname, msng_filenode_set[fname])
1913 add_extra_nodes(fname, msng_filenode_set[fname])
1907 msng_filenode_lst = msng_filenode_set[fname].keys()
1914 msng_filenode_lst = msng_filenode_set[fname].keys()
1908 else:
1915 else:
1909 msng_filenode_lst = []
1916 msng_filenode_lst = []
1910 # If any filenodes are left, generate the group for them,
1917 # If any filenodes are left, generate the group for them,
1911 # otherwise don't bother.
1918 # otherwise don't bother.
1912 if len(msng_filenode_lst) > 0:
1919 if len(msng_filenode_lst) > 0:
1913 yield changegroup.chunkheader(len(fname))
1920 yield changegroup.chunkheader(len(fname))
1914 yield fname
1921 yield fname
1915 # Sort the filenodes by their revision #
1922 # Sort the filenodes by their revision #
1916 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1923 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1917 # Create a group generator and only pass in a changenode
1924 # Create a group generator and only pass in a changenode
1918 # lookup function as we need to collect no information
1925 # lookup function as we need to collect no information
1919 # from filenodes.
1926 # from filenodes.
1920 group = filerevlog.group(msng_filenode_lst,
1927 group = filerevlog.group(msng_filenode_lst,
1921 lookup_filenode_link_func(fname))
1928 lookup_filenode_link_func(fname))
1922 for chnk in group:
1929 for chnk in group:
1923 yield chnk
1930 yield chnk
1924 if fname in msng_filenode_set:
1931 if fname in msng_filenode_set:
1925 # Don't need this anymore, toss it to free memory.
1932 # Don't need this anymore, toss it to free memory.
1926 del msng_filenode_set[fname]
1933 del msng_filenode_set[fname]
1927 # Signal that no more groups are left.
1934 # Signal that no more groups are left.
1928 yield changegroup.closechunk()
1935 yield changegroup.closechunk()
1929
1936
1930 if msng_cl_lst:
1937 if msng_cl_lst:
1931 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1938 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1932
1939
1933 return util.chunkbuffer(gengroup())
1940 return util.chunkbuffer(gengroup())
1934
1941
1935 def changegroup(self, basenodes, source):
1942 def changegroup(self, basenodes, source):
1936 # to avoid a race we use changegroupsubset() (issue1320)
1943 # to avoid a race we use changegroupsubset() (issue1320)
1937 return self.changegroupsubset(basenodes, self.heads(), source)
1944 return self.changegroupsubset(basenodes, self.heads(), source)
1938
1945
1939 def _changegroup(self, common, source):
1946 def _changegroup(self, common, source):
1940 """Generate a changegroup of all nodes that we have that a recipient
1947 """Generate a changegroup of all nodes that we have that a recipient
1941 doesn't.
1948 doesn't.
1942
1949
1943 This is much easier than the previous function as we can assume that
1950 This is much easier than the previous function as we can assume that
1944 the recipient has any changenode we aren't sending them.
1951 the recipient has any changenode we aren't sending them.
1945
1952
1946 common is the set of common nodes between remote and self"""
1953 common is the set of common nodes between remote and self"""
1947
1954
1948 self.hook('preoutgoing', throw=True, source=source)
1955 self.hook('preoutgoing', throw=True, source=source)
1949
1956
1950 cl = self.changelog
1957 cl = self.changelog
1951 nodes = cl.findmissing(common)
1958 nodes = cl.findmissing(common)
1952 revset = set([cl.rev(n) for n in nodes])
1959 revset = set([cl.rev(n) for n in nodes])
1953 self.changegroupinfo(nodes, source)
1960 self.changegroupinfo(nodes, source)
1954
1961
1955 def identity(x):
1962 def identity(x):
1956 return x
1963 return x
1957
1964
1958 def gennodelst(log):
1965 def gennodelst(log):
1959 for r in log:
1966 for r in log:
1960 if log.linkrev(r) in revset:
1967 if log.linkrev(r) in revset:
1961 yield log.node(r)
1968 yield log.node(r)
1962
1969
1963 def changed_file_collector(changedfileset):
1970 def changed_file_collector(changedfileset):
1964 def collect_changed_files(clnode):
1971 def collect_changed_files(clnode):
1965 c = cl.read(clnode)
1972 c = cl.read(clnode)
1966 changedfileset.update(c[3])
1973 changedfileset.update(c[3])
1967 return collect_changed_files
1974 return collect_changed_files
1968
1975
1969 def lookuprevlink_func(revlog):
1976 def lookuprevlink_func(revlog):
1970 def lookuprevlink(n):
1977 def lookuprevlink(n):
1971 return cl.node(revlog.linkrev(revlog.rev(n)))
1978 return cl.node(revlog.linkrev(revlog.rev(n)))
1972 return lookuprevlink
1979 return lookuprevlink
1973
1980
1974 def gengroup():
1981 def gengroup():
1975 # construct a list of all changed files
1982 # construct a list of all changed files
1976 changedfiles = set()
1983 changedfiles = set()
1977
1984
1978 for chnk in cl.group(nodes, identity,
1985 for chnk in cl.group(nodes, identity,
1979 changed_file_collector(changedfiles)):
1986 changed_file_collector(changedfiles)):
1980 yield chnk
1987 yield chnk
1981
1988
1982 mnfst = self.manifest
1989 mnfst = self.manifest
1983 nodeiter = gennodelst(mnfst)
1990 nodeiter = gennodelst(mnfst)
1984 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1991 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1985 yield chnk
1992 yield chnk
1986
1993
1987 for fname in sorted(changedfiles):
1994 for fname in sorted(changedfiles):
1988 filerevlog = self.file(fname)
1995 filerevlog = self.file(fname)
1989 if not len(filerevlog):
1996 if not len(filerevlog):
1990 raise util.Abort(_("empty or missing revlog for %s") % fname)
1997 raise util.Abort(_("empty or missing revlog for %s") % fname)
1991 nodeiter = gennodelst(filerevlog)
1998 nodeiter = gennodelst(filerevlog)
1992 nodeiter = list(nodeiter)
1999 nodeiter = list(nodeiter)
1993 if nodeiter:
2000 if nodeiter:
1994 yield changegroup.chunkheader(len(fname))
2001 yield changegroup.chunkheader(len(fname))
1995 yield fname
2002 yield fname
1996 lookup = lookuprevlink_func(filerevlog)
2003 lookup = lookuprevlink_func(filerevlog)
1997 for chnk in filerevlog.group(nodeiter, lookup):
2004 for chnk in filerevlog.group(nodeiter, lookup):
1998 yield chnk
2005 yield chnk
1999
2006
2000 yield changegroup.closechunk()
2007 yield changegroup.closechunk()
2001
2008
2002 if nodes:
2009 if nodes:
2003 self.hook('outgoing', node=hex(nodes[0]), source=source)
2010 self.hook('outgoing', node=hex(nodes[0]), source=source)
2004
2011
2005 return util.chunkbuffer(gengroup())
2012 return util.chunkbuffer(gengroup())
2006
2013
2007 def addchangegroup(self, source, srctype, url, emptyok=False):
2014 def addchangegroup(self, source, srctype, url, emptyok=False):
2008 """add changegroup to repo.
2015 """add changegroup to repo.
2009
2016
2010 return values:
2017 return values:
2011 - nothing changed or no source: 0
2018 - nothing changed or no source: 0
2012 - more heads than before: 1+added heads (2..n)
2019 - more heads than before: 1+added heads (2..n)
2013 - less heads than before: -1-removed heads (-2..-n)
2020 - less heads than before: -1-removed heads (-2..-n)
2014 - number of heads stays the same: 1
2021 - number of heads stays the same: 1
2015 """
2022 """
2016 def csmap(x):
2023 def csmap(x):
2017 self.ui.debug(_("add changeset %s\n") % short(x))
2024 self.ui.debug(_("add changeset %s\n") % short(x))
2018 return len(cl)
2025 return len(cl)
2019
2026
2020 def revmap(x):
2027 def revmap(x):
2021 return cl.rev(x)
2028 return cl.rev(x)
2022
2029
2023 if not source:
2030 if not source:
2024 return 0
2031 return 0
2025
2032
2026 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2033 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2027
2034
2028 changesets = files = revisions = 0
2035 changesets = files = revisions = 0
2029
2036
2030 # write changelog data to temp files so concurrent readers will not see
2037 # write changelog data to temp files so concurrent readers will not see
2031 # inconsistent view
2038 # inconsistent view
2032 cl = self.changelog
2039 cl = self.changelog
2033 cl.delayupdate()
2040 cl.delayupdate()
2034 oldheads = len(cl.heads())
2041 oldheads = len(cl.heads())
2035
2042
2036 tr = self.transaction()
2043 tr = self.transaction()
2037 try:
2044 try:
2038 trp = weakref.proxy(tr)
2045 trp = weakref.proxy(tr)
2039 # pull off the changeset group
2046 # pull off the changeset group
2040 self.ui.status(_("adding changesets\n"))
2047 self.ui.status(_("adding changesets\n"))
2041 clstart = len(cl)
2048 clstart = len(cl)
2042 chunkiter = changegroup.chunkiter(source)
2049 chunkiter = changegroup.chunkiter(source)
2043 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2050 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2044 raise util.Abort(_("received changelog group is empty"))
2051 raise util.Abort(_("received changelog group is empty"))
2045 clend = len(cl)
2052 clend = len(cl)
2046 changesets = clend - clstart
2053 changesets = clend - clstart
2047
2054
2048 # pull off the manifest group
2055 # pull off the manifest group
2049 self.ui.status(_("adding manifests\n"))
2056 self.ui.status(_("adding manifests\n"))
2050 chunkiter = changegroup.chunkiter(source)
2057 chunkiter = changegroup.chunkiter(source)
2051 # no need to check for empty manifest group here:
2058 # no need to check for empty manifest group here:
2052 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2059 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2053 # no new manifest will be created and the manifest group will
2060 # no new manifest will be created and the manifest group will
2054 # be empty during the pull
2061 # be empty during the pull
2055 self.manifest.addgroup(chunkiter, revmap, trp)
2062 self.manifest.addgroup(chunkiter, revmap, trp)
2056
2063
2057 # process the files
2064 # process the files
2058 self.ui.status(_("adding file changes\n"))
2065 self.ui.status(_("adding file changes\n"))
2059 while 1:
2066 while 1:
2060 f = changegroup.getchunk(source)
2067 f = changegroup.getchunk(source)
2061 if not f:
2068 if not f:
2062 break
2069 break
2063 self.ui.debug(_("adding %s revisions\n") % f)
2070 self.ui.debug(_("adding %s revisions\n") % f)
2064 fl = self.file(f)
2071 fl = self.file(f)
2065 o = len(fl)
2072 o = len(fl)
2066 chunkiter = changegroup.chunkiter(source)
2073 chunkiter = changegroup.chunkiter(source)
2067 if fl.addgroup(chunkiter, revmap, trp) is None:
2074 if fl.addgroup(chunkiter, revmap, trp) is None:
2068 raise util.Abort(_("received file revlog group is empty"))
2075 raise util.Abort(_("received file revlog group is empty"))
2069 revisions += len(fl) - o
2076 revisions += len(fl) - o
2070 files += 1
2077 files += 1
2071
2078
2072 newheads = len(cl.heads())
2079 newheads = len(cl.heads())
2073 heads = ""
2080 heads = ""
2074 if oldheads and newheads != oldheads:
2081 if oldheads and newheads != oldheads:
2075 heads = _(" (%+d heads)") % (newheads - oldheads)
2082 heads = _(" (%+d heads)") % (newheads - oldheads)
2076
2083
2077 self.ui.status(_("added %d changesets"
2084 self.ui.status(_("added %d changesets"
2078 " with %d changes to %d files%s\n")
2085 " with %d changes to %d files%s\n")
2079 % (changesets, revisions, files, heads))
2086 % (changesets, revisions, files, heads))
2080
2087
2081 if changesets > 0:
2088 if changesets > 0:
2082 p = lambda: cl.writepending() and self.root or ""
2089 p = lambda: cl.writepending() and self.root or ""
2083 self.hook('pretxnchangegroup', throw=True,
2090 self.hook('pretxnchangegroup', throw=True,
2084 node=hex(cl.node(clstart)), source=srctype,
2091 node=hex(cl.node(clstart)), source=srctype,
2085 url=url, pending=p)
2092 url=url, pending=p)
2086
2093
2087 # make changelog see real files again
2094 # make changelog see real files again
2088 cl.finalize(trp)
2095 cl.finalize(trp)
2089
2096
2090 tr.close()
2097 tr.close()
2091 finally:
2098 finally:
2092 del tr
2099 del tr
2093
2100
2094 if changesets > 0:
2101 if changesets > 0:
2095 # forcefully update the on-disk branch cache
2102 # forcefully update the on-disk branch cache
2096 self.ui.debug(_("updating the branch cache\n"))
2103 self.ui.debug(_("updating the branch cache\n"))
2097 self.branchtags()
2104 self.branchtags()
2098 self.hook("changegroup", node=hex(cl.node(clstart)),
2105 self.hook("changegroup", node=hex(cl.node(clstart)),
2099 source=srctype, url=url)
2106 source=srctype, url=url)
2100
2107
2101 for i in xrange(clstart, clend):
2108 for i in xrange(clstart, clend):
2102 self.hook("incoming", node=hex(cl.node(i)),
2109 self.hook("incoming", node=hex(cl.node(i)),
2103 source=srctype, url=url)
2110 source=srctype, url=url)
2104
2111
2105 # never return 0 here:
2112 # never return 0 here:
2106 if newheads < oldheads:
2113 if newheads < oldheads:
2107 return newheads - oldheads - 1
2114 return newheads - oldheads - 1
2108 else:
2115 else:
2109 return newheads - oldheads + 1
2116 return newheads - oldheads + 1
2110
2117
2111
2118
2112 def stream_in(self, remote):
2119 def stream_in(self, remote):
2113 fp = remote.stream_out()
2120 fp = remote.stream_out()
2114 l = fp.readline()
2121 l = fp.readline()
2115 try:
2122 try:
2116 resp = int(l)
2123 resp = int(l)
2117 except ValueError:
2124 except ValueError:
2118 raise error.ResponseError(
2125 raise error.ResponseError(
2119 _('Unexpected response from remote server:'), l)
2126 _('Unexpected response from remote server:'), l)
2120 if resp == 1:
2127 if resp == 1:
2121 raise util.Abort(_('operation forbidden by server'))
2128 raise util.Abort(_('operation forbidden by server'))
2122 elif resp == 2:
2129 elif resp == 2:
2123 raise util.Abort(_('locking the remote repository failed'))
2130 raise util.Abort(_('locking the remote repository failed'))
2124 elif resp != 0:
2131 elif resp != 0:
2125 raise util.Abort(_('the server sent an unknown error code'))
2132 raise util.Abort(_('the server sent an unknown error code'))
2126 self.ui.status(_('streaming all changes\n'))
2133 self.ui.status(_('streaming all changes\n'))
2127 l = fp.readline()
2134 l = fp.readline()
2128 try:
2135 try:
2129 total_files, total_bytes = map(int, l.split(' ', 1))
2136 total_files, total_bytes = map(int, l.split(' ', 1))
2130 except (ValueError, TypeError):
2137 except (ValueError, TypeError):
2131 raise error.ResponseError(
2138 raise error.ResponseError(
2132 _('Unexpected response from remote server:'), l)
2139 _('Unexpected response from remote server:'), l)
2133 self.ui.status(_('%d files to transfer, %s of data\n') %
2140 self.ui.status(_('%d files to transfer, %s of data\n') %
2134 (total_files, util.bytecount(total_bytes)))
2141 (total_files, util.bytecount(total_bytes)))
2135 start = time.time()
2142 start = time.time()
2136 for i in xrange(total_files):
2143 for i in xrange(total_files):
2137 # XXX doesn't support '\n' or '\r' in filenames
2144 # XXX doesn't support '\n' or '\r' in filenames
2138 l = fp.readline()
2145 l = fp.readline()
2139 try:
2146 try:
2140 name, size = l.split('\0', 1)
2147 name, size = l.split('\0', 1)
2141 size = int(size)
2148 size = int(size)
2142 except (ValueError, TypeError):
2149 except (ValueError, TypeError):
2143 raise error.ResponseError(
2150 raise error.ResponseError(
2144 _('Unexpected response from remote server:'), l)
2151 _('Unexpected response from remote server:'), l)
2145 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2152 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2146 # for backwards compat, name was partially encoded
2153 # for backwards compat, name was partially encoded
2147 ofp = self.sopener(store.decodedir(name), 'w')
2154 ofp = self.sopener(store.decodedir(name), 'w')
2148 for chunk in util.filechunkiter(fp, limit=size):
2155 for chunk in util.filechunkiter(fp, limit=size):
2149 ofp.write(chunk)
2156 ofp.write(chunk)
2150 ofp.close()
2157 ofp.close()
2151 elapsed = time.time() - start
2158 elapsed = time.time() - start
2152 if elapsed <= 0:
2159 if elapsed <= 0:
2153 elapsed = 0.001
2160 elapsed = 0.001
2154 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2161 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2155 (util.bytecount(total_bytes), elapsed,
2162 (util.bytecount(total_bytes), elapsed,
2156 util.bytecount(total_bytes / elapsed)))
2163 util.bytecount(total_bytes / elapsed)))
2157 self.invalidate()
2164 self.invalidate()
2158 return len(self.heads()) + 1
2165 return len(self.heads()) + 1
2159
2166
2160 def clone(self, remote, heads=[], stream=False):
2167 def clone(self, remote, heads=[], stream=False):
2161 '''clone remote repository.
2168 '''clone remote repository.
2162
2169
2163 keyword arguments:
2170 keyword arguments:
2164 heads: list of revs to clone (forces use of pull)
2171 heads: list of revs to clone (forces use of pull)
2165 stream: use streaming clone if possible'''
2172 stream: use streaming clone if possible'''
2166
2173
2167 # now, all clients that can request uncompressed clones can
2174 # now, all clients that can request uncompressed clones can
2168 # read repo formats supported by all servers that can serve
2175 # read repo formats supported by all servers that can serve
2169 # them.
2176 # them.
2170
2177
2171 # if revlog format changes, client will have to check version
2178 # if revlog format changes, client will have to check version
2172 # and format flags on "stream" capability, and use
2179 # and format flags on "stream" capability, and use
2173 # uncompressed only if compatible.
2180 # uncompressed only if compatible.
2174
2181
2175 if stream and not heads and remote.capable('stream'):
2182 if stream and not heads and remote.capable('stream'):
2176 return self.stream_in(remote)
2183 return self.stream_in(remote)
2177 return self.pull(remote, heads)
2184 return self.pull(remote, heads)
2178
2185
2179 # used to avoid circular references so destructors work
2186 # used to avoid circular references so destructors work
2180 def aftertrans(files):
2187 def aftertrans(files):
2181 renamefiles = [tuple(t) for t in files]
2188 renamefiles = [tuple(t) for t in files]
2182 def a():
2189 def a():
2183 for src, dest in renamefiles:
2190 for src, dest in renamefiles:
2184 util.rename(src, dest)
2191 util.rename(src, dest)
2185 return a
2192 return a
2186
2193
2187 def instance(ui, path, create):
2194 def instance(ui, path, create):
2188 return localrepository(ui, util.drop_scheme('file', path), create)
2195 return localrepository(ui, util.drop_scheme('file', path), create)
2189
2196
2190 def islocal(path):
2197 def islocal(path):
2191 return True
2198 return True
@@ -1,154 +1,154 b''
1 0: Adding root node
1 0: Adding root node
2 -------
2 -------
3 0: Adding root node
3 0: Adding root node
4 =======
4 =======
5 marked working directory as branch a
5 marked working directory as branch a
6 1: Adding a branch
6 1: Adding a branch
7 -------
7 -------
8 1: Adding a branch
8 1: Adding a branch
9 =======
9 =======
10 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
10 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
11 marked working directory as branch b
11 marked working directory as branch b
12 created new head
12 created new head
13 2: Adding b branch
13 2: Adding b branch
14 1: Adding a branch
14 1: Adding a branch
15 -------
15 -------
16 2: Adding b branch
16 2: Adding b branch
17 =======
17 =======
18 3: Adding b branch head 1
18 3: Adding b branch head 1
19 1: Adding a branch
19 1: Adding a branch
20 -------
20 -------
21 3: Adding b branch head 1
21 3: Adding b branch head 1
22 =======
22 =======
23 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
23 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
24 created new head
24 created new head
25 4: Adding b branch head 2
25 4: Adding b branch head 2
26 3: Adding b branch head 1
26 3: Adding b branch head 1
27 1: Adding a branch
27 1: Adding a branch
28 -------
28 -------
29 4: Adding b branch head 2
29 4: Adding b branch head 2
30 3: Adding b branch head 1
30 3: Adding b branch head 1
31 =======
31 =======
32 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
32 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
33 created new head
33 created new head
34 5: Adding b branch head 3
34 5: Adding b branch head 3
35 4: Adding b branch head 2
35 4: Adding b branch head 2
36 3: Adding b branch head 1
36 3: Adding b branch head 1
37 1: Adding a branch
37 1: Adding a branch
38 -------
38 -------
39 5: Adding b branch head 3
39 5: Adding b branch head 3
40 4: Adding b branch head 2
40 4: Adding b branch head 2
41 3: Adding b branch head 1
41 3: Adding b branch head 1
42 =======
42 =======
43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 (branch merge, don't forget to commit)
44 (branch merge, don't forget to commit)
45 6: Merging b branch head 2 and b branch head 3
45 6: Merging b branch head 2 and b branch head 3
46 3: Adding b branch head 1
46 3: Adding b branch head 1
47 1: Adding a branch
47 1: Adding a branch
48 -------
48 -------
49 6: Merging b branch head 2 and b branch head 3
49 6: Merging b branch head 2 and b branch head 3
50 3: Adding b branch head 1
50 3: Adding b branch head 1
51 =======
51 =======
52 marked working directory as branch c
52 marked working directory as branch c
53 7: Adding c branch
53 7: Adding c branch
54 3: Adding b branch head 1
54 3: Adding b branch head 1
55 1: Adding a branch
55 1: Adding a branch
56 -------
56 -------
57 7: Adding c branch
57 7: Adding c branch
58 =======
58 =======
59 no changes on branch c containing . are reachable from 3
59 no changes on branch c containing . are reachable from 3
60 1
60 1
61 -------
61 -------
62 7: Adding c branch
62 7: Adding c branch
63 0
63 0
64 -------
64 -------
65 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
65 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
66 0
66 0
67 -------
67 -------
68 3: Adding b branch head 1
68 3: Adding b branch head 1
69 0
69 0
70 -------
70 -------
71 6: Merging b branch head 2 and b branch head 3
71 3: Adding b branch head 1
72 3: Adding b branch head 1
72 6: Merging b branch head 2 and b branch head 3
73 0
73 0
74 -------
74 -------
75 no changes on branch b containing . are reachable from 7
75 no changes on branch b containing . are reachable from 7
76 1
76 1
77 =======
77 =======
78 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
78 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
79 7: Adding c branch
79 7: Adding c branch
80 3: Adding b branch head 1
80 3: Adding b branch head 1
81 1: Adding a branch
81 1: Adding a branch
82 -------
82 -------
83 0: Adding root node
83 0: Adding root node
84 -------
84 -------
85 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 7: Adding c branch
86 7: Adding c branch
87 3: Adding b branch head 1
87 3: Adding b branch head 1
88 1: Adding a branch
88 1: Adding a branch
89 -------
89 -------
90 1: Adding a branch
90 1: Adding a branch
91 -------
91 -------
92 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
92 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
93 7: Adding c branch
93 7: Adding c branch
94 3: Adding b branch head 1
94 3: Adding b branch head 1
95 1: Adding a branch
95 1: Adding a branch
96 -------
96 -------
97 6: Merging b branch head 2 and b branch head 3
97 6: Merging b branch head 2 and b branch head 3
98 3: Adding b branch head 1
98 3: Adding b branch head 1
99 -------
99 -------
100 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
101 7: Adding c branch
101 7: Adding c branch
102 3: Adding b branch head 1
102 3: Adding b branch head 1
103 1: Adding a branch
103 1: Adding a branch
104 -------
104 -------
105 6: Merging b branch head 2 and b branch head 3
105 6: Merging b branch head 2 and b branch head 3
106 3: Adding b branch head 1
106 3: Adding b branch head 1
107 -------
107 -------
108 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
108 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
109 7: Adding c branch
109 7: Adding c branch
110 3: Adding b branch head 1
110 3: Adding b branch head 1
111 1: Adding a branch
111 1: Adding a branch
112 -------
112 -------
113 6: Merging b branch head 2 and b branch head 3
113 6: Merging b branch head 2 and b branch head 3
114 3: Adding b branch head 1
114 3: Adding b branch head 1
115 -------
115 -------
116 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
116 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
117 7: Adding c branch
117 7: Adding c branch
118 3: Adding b branch head 1
118 3: Adding b branch head 1
119 1: Adding a branch
119 1: Adding a branch
120 -------
120 -------
121 6: Merging b branch head 2 and b branch head 3
121 6: Merging b branch head 2 and b branch head 3
122 3: Adding b branch head 1
122 3: Adding b branch head 1
123 -------
123 -------
124 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
124 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
125 7: Adding c branch
125 7: Adding c branch
126 3: Adding b branch head 1
126 3: Adding b branch head 1
127 1: Adding a branch
127 1: Adding a branch
128 -------
128 -------
129 6: Merging b branch head 2 and b branch head 3
129 6: Merging b branch head 2 and b branch head 3
130 3: Adding b branch head 1
130 3: Adding b branch head 1
131 -------
131 -------
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 7: Adding c branch
133 7: Adding c branch
134 3: Adding b branch head 1
134 3: Adding b branch head 1
135 1: Adding a branch
135 1: Adding a branch
136 -------
136 -------
137 7: Adding c branch
137 7: Adding c branch
138 -------
138 -------
139 =======
139 =======
140 1: Adding a branch
140 1: Adding a branch
141 -------
141 -------
142 6: Merging b branch head 2 and b branch head 3
142 6: Merging b branch head 2 and b branch head 3
143 3: Adding b branch head 1
143 3: Adding b branch head 1
144 -------
144 -------
145 7: Adding c branch
145 7: Adding c branch
146 -------
146 -------
147 abort: unknown revision 'z'!
147 abort: unknown revision 'z'!
148 -------
148 -------
149 =======
149 =======
150 0: Adding root node
150 0: Adding root node
151 1: Adding a branch
151 1: Adding a branch
152 6: Merging b branch head 2 and b branch head 3
152 6: Merging b branch head 2 and b branch head 3
153 3: Adding b branch head 1
153 3: Adding b branch head 1
154 7: Adding c branch
154 7: Adding c branch
General Comments 0
You need to be logged in to leave comments. Login now