##// END OF EJS Templates
push: use the fast changegroup() path on push...
Benoit Boissinot -
r7460:3342e6ad default
parent child Browse files
Show More
@@ -1,2150 +1,2151
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise repo.RepoError(_("repository %s not found") % path)
50 raise repo.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise repo.RepoError(_("repository %s already exists") % path)
52 raise repo.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError(name)
101 raise AttributeError(name)
102
102
103 def __getitem__(self, changeid):
103 def __getitem__(self, changeid):
104 if changeid == None:
104 if changeid == None:
105 return context.workingctx(self)
105 return context.workingctx(self)
106 return context.changectx(self, changeid)
106 return context.changectx(self, changeid)
107
107
108 def __nonzero__(self):
108 def __nonzero__(self):
109 return True
109 return True
110
110
111 def __len__(self):
111 def __len__(self):
112 return len(self.changelog)
112 return len(self.changelog)
113
113
114 def __iter__(self):
114 def __iter__(self):
115 for i in xrange(len(self)):
115 for i in xrange(len(self)):
116 yield i
116 yield i
117
117
118 def url(self):
118 def url(self):
119 return 'file:' + self.root
119 return 'file:' + self.root
120
120
121 def hook(self, name, throw=False, **args):
121 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
122 return hook.hook(self.ui, self, name, throw, **args)
123
123
124 tag_disallowed = ':\r\n'
124 tag_disallowed = ':\r\n'
125
125
126 def _tag(self, names, node, message, local, user, date, parent=None,
126 def _tag(self, names, node, message, local, user, date, parent=None,
127 extra={}):
127 extra={}):
128 use_dirstate = parent is None
128 use_dirstate = parent is None
129
129
130 if isinstance(names, str):
130 if isinstance(names, str):
131 allchars = names
131 allchars = names
132 names = (names,)
132 names = (names,)
133 else:
133 else:
134 allchars = ''.join(names)
134 allchars = ''.join(names)
135 for c in self.tag_disallowed:
135 for c in self.tag_disallowed:
136 if c in allchars:
136 if c in allchars:
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138
138
139 for name in names:
139 for name in names:
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 local=local)
141 local=local)
142
142
143 def writetags(fp, names, munge, prevtags):
143 def writetags(fp, names, munge, prevtags):
144 fp.seek(0, 2)
144 fp.seek(0, 2)
145 if prevtags and prevtags[-1] != '\n':
145 if prevtags and prevtags[-1] != '\n':
146 fp.write('\n')
146 fp.write('\n')
147 for name in names:
147 for name in names:
148 m = munge and munge(name) or name
148 m = munge and munge(name) or name
149 if self._tagstypecache and name in self._tagstypecache:
149 if self._tagstypecache and name in self._tagstypecache:
150 old = self.tagscache.get(name, nullid)
150 old = self.tagscache.get(name, nullid)
151 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(old), m))
152 fp.write('%s %s\n' % (hex(node), m))
152 fp.write('%s %s\n' % (hex(node), m))
153 fp.close()
153 fp.close()
154
154
155 prevtags = ''
155 prevtags = ''
156 if local:
156 if local:
157 try:
157 try:
158 fp = self.opener('localtags', 'r+')
158 fp = self.opener('localtags', 'r+')
159 except IOError, err:
159 except IOError, err:
160 fp = self.opener('localtags', 'a')
160 fp = self.opener('localtags', 'a')
161 else:
161 else:
162 prevtags = fp.read()
162 prevtags = fp.read()
163
163
164 # local tags are stored in the current charset
164 # local tags are stored in the current charset
165 writetags(fp, names, None, prevtags)
165 writetags(fp, names, None, prevtags)
166 for name in names:
166 for name in names:
167 self.hook('tag', node=hex(node), tag=name, local=local)
167 self.hook('tag', node=hex(node), tag=name, local=local)
168 return
168 return
169
169
170 if use_dirstate:
170 if use_dirstate:
171 try:
171 try:
172 fp = self.wfile('.hgtags', 'rb+')
172 fp = self.wfile('.hgtags', 'rb+')
173 except IOError, err:
173 except IOError, err:
174 fp = self.wfile('.hgtags', 'ab')
174 fp = self.wfile('.hgtags', 'ab')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177 else:
177 else:
178 try:
178 try:
179 prevtags = self.filectx('.hgtags', parent).data()
179 prevtags = self.filectx('.hgtags', parent).data()
180 except revlog.LookupError:
180 except revlog.LookupError:
181 pass
181 pass
182 fp = self.wfile('.hgtags', 'wb')
182 fp = self.wfile('.hgtags', 'wb')
183 if prevtags:
183 if prevtags:
184 fp.write(prevtags)
184 fp.write(prevtags)
185
185
186 # committed tags are stored in UTF-8
186 # committed tags are stored in UTF-8
187 writetags(fp, names, util.fromlocal, prevtags)
187 writetags(fp, names, util.fromlocal, prevtags)
188
188
189 if use_dirstate and '.hgtags' not in self.dirstate:
189 if use_dirstate and '.hgtags' not in self.dirstate:
190 self.add(['.hgtags'])
190 self.add(['.hgtags'])
191
191
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 extra=extra)
193 extra=extra)
194
194
195 for name in names:
195 for name in names:
196 self.hook('tag', node=hex(node), tag=name, local=local)
196 self.hook('tag', node=hex(node), tag=name, local=local)
197
197
198 return tagnode
198 return tagnode
199
199
200 def tag(self, names, node, message, local, user, date):
200 def tag(self, names, node, message, local, user, date):
201 '''tag a revision with one or more symbolic names.
201 '''tag a revision with one or more symbolic names.
202
202
203 names is a list of strings or, when adding a single tag, names may be a
203 names is a list of strings or, when adding a single tag, names may be a
204 string.
204 string.
205
205
206 if local is True, the tags are stored in a per-repository file.
206 if local is True, the tags are stored in a per-repository file.
207 otherwise, they are stored in the .hgtags file, and a new
207 otherwise, they are stored in the .hgtags file, and a new
208 changeset is committed with the change.
208 changeset is committed with the change.
209
209
210 keyword arguments:
210 keyword arguments:
211
211
212 local: whether to store tags in non-version-controlled file
212 local: whether to store tags in non-version-controlled file
213 (default False)
213 (default False)
214
214
215 message: commit message to use if committing
215 message: commit message to use if committing
216
216
217 user: name of user to use if committing
217 user: name of user to use if committing
218
218
219 date: date tuple to use if committing'''
219 date: date tuple to use if committing'''
220
220
221 for x in self.status()[:5]:
221 for x in self.status()[:5]:
222 if '.hgtags' in x:
222 if '.hgtags' in x:
223 raise util.Abort(_('working copy of .hgtags is changed '
223 raise util.Abort(_('working copy of .hgtags is changed '
224 '(please commit .hgtags manually)'))
224 '(please commit .hgtags manually)'))
225
225
226 self._tag(names, node, message, local, user, date)
226 self._tag(names, node, message, local, user, date)
227
227
228 def tags(self):
228 def tags(self):
229 '''return a mapping of tag to node'''
229 '''return a mapping of tag to node'''
230 if self.tagscache:
230 if self.tagscache:
231 return self.tagscache
231 return self.tagscache
232
232
233 globaltags = {}
233 globaltags = {}
234 tagtypes = {}
234 tagtypes = {}
235
235
236 def readtags(lines, fn, tagtype):
236 def readtags(lines, fn, tagtype):
237 filetags = {}
237 filetags = {}
238 count = 0
238 count = 0
239
239
240 def warn(msg):
240 def warn(msg):
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242
242
243 for l in lines:
243 for l in lines:
244 count += 1
244 count += 1
245 if not l:
245 if not l:
246 continue
246 continue
247 s = l.split(" ", 1)
247 s = l.split(" ", 1)
248 if len(s) != 2:
248 if len(s) != 2:
249 warn(_("cannot parse entry"))
249 warn(_("cannot parse entry"))
250 continue
250 continue
251 node, key = s
251 node, key = s
252 key = util.tolocal(key.strip()) # stored in UTF-8
252 key = util.tolocal(key.strip()) # stored in UTF-8
253 try:
253 try:
254 bin_n = bin(node)
254 bin_n = bin(node)
255 except TypeError:
255 except TypeError:
256 warn(_("node '%s' is not well formed") % node)
256 warn(_("node '%s' is not well formed") % node)
257 continue
257 continue
258 if bin_n not in self.changelog.nodemap:
258 if bin_n not in self.changelog.nodemap:
259 warn(_("tag '%s' refers to unknown node") % key)
259 warn(_("tag '%s' refers to unknown node") % key)
260 continue
260 continue
261
261
262 h = []
262 h = []
263 if key in filetags:
263 if key in filetags:
264 n, h = filetags[key]
264 n, h = filetags[key]
265 h.append(n)
265 h.append(n)
266 filetags[key] = (bin_n, h)
266 filetags[key] = (bin_n, h)
267
267
268 for k, nh in filetags.items():
268 for k, nh in filetags.items():
269 if k not in globaltags:
269 if k not in globaltags:
270 globaltags[k] = nh
270 globaltags[k] = nh
271 tagtypes[k] = tagtype
271 tagtypes[k] = tagtype
272 continue
272 continue
273
273
274 # we prefer the global tag if:
274 # we prefer the global tag if:
275 # it supercedes us OR
275 # it supercedes us OR
276 # mutual supercedes and it has a higher rank
276 # mutual supercedes and it has a higher rank
277 # otherwise we win because we're tip-most
277 # otherwise we win because we're tip-most
278 an, ah = nh
278 an, ah = nh
279 bn, bh = globaltags[k]
279 bn, bh = globaltags[k]
280 if (bn != an and an in bh and
280 if (bn != an and an in bh and
281 (bn not in ah or len(bh) > len(ah))):
281 (bn not in ah or len(bh) > len(ah))):
282 an = bn
282 an = bn
283 ah.extend([n for n in bh if n not in ah])
283 ah.extend([n for n in bh if n not in ah])
284 globaltags[k] = an, ah
284 globaltags[k] = an, ah
285 tagtypes[k] = tagtype
285 tagtypes[k] = tagtype
286
286
287 # read the tags file from each head, ending with the tip
287 # read the tags file from each head, ending with the tip
288 f = None
288 f = None
289 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
290 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
291 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
292 readtags(f.data().splitlines(), f, "global")
292 readtags(f.data().splitlines(), f, "global")
293
293
294 try:
294 try:
295 data = util.fromlocal(self.opener("localtags").read())
295 data = util.fromlocal(self.opener("localtags").read())
296 # localtags are stored in the local character set
296 # localtags are stored in the local character set
297 # while the internal tag table is stored in UTF-8
297 # while the internal tag table is stored in UTF-8
298 readtags(data.splitlines(), "localtags", "local")
298 readtags(data.splitlines(), "localtags", "local")
299 except IOError:
299 except IOError:
300 pass
300 pass
301
301
302 self.tagscache = {}
302 self.tagscache = {}
303 self._tagstypecache = {}
303 self._tagstypecache = {}
304 for k,nh in globaltags.items():
304 for k,nh in globaltags.items():
305 n = nh[0]
305 n = nh[0]
306 if n != nullid:
306 if n != nullid:
307 self.tagscache[k] = n
307 self.tagscache[k] = n
308 self._tagstypecache[k] = tagtypes[k]
308 self._tagstypecache[k] = tagtypes[k]
309 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
310 return self.tagscache
310 return self.tagscache
311
311
312 def tagtype(self, tagname):
312 def tagtype(self, tagname):
313 '''
313 '''
314 return the type of the given tag. result can be:
314 return the type of the given tag. result can be:
315
315
316 'local' : a local tag
316 'local' : a local tag
317 'global' : a global tag
317 'global' : a global tag
318 None : tag does not exist
318 None : tag does not exist
319 '''
319 '''
320
320
321 self.tags()
321 self.tags()
322
322
323 return self._tagstypecache.get(tagname)
323 return self._tagstypecache.get(tagname)
324
324
325 def _hgtagsnodes(self):
325 def _hgtagsnodes(self):
326 heads = self.heads()
326 heads = self.heads()
327 heads.reverse()
327 heads.reverse()
328 last = {}
328 last = {}
329 ret = []
329 ret = []
330 for node in heads:
330 for node in heads:
331 c = self[node]
331 c = self[node]
332 rev = c.rev()
332 rev = c.rev()
333 try:
333 try:
334 fnode = c.filenode('.hgtags')
334 fnode = c.filenode('.hgtags')
335 except revlog.LookupError:
335 except revlog.LookupError:
336 continue
336 continue
337 ret.append((rev, node, fnode))
337 ret.append((rev, node, fnode))
338 if fnode in last:
338 if fnode in last:
339 ret[last[fnode]] = None
339 ret[last[fnode]] = None
340 last[fnode] = len(ret) - 1
340 last[fnode] = len(ret) - 1
341 return [item for item in ret if item]
341 return [item for item in ret if item]
342
342
343 def tagslist(self):
343 def tagslist(self):
344 '''return a list of tags ordered by revision'''
344 '''return a list of tags ordered by revision'''
345 l = []
345 l = []
346 for t, n in self.tags().items():
346 for t, n in self.tags().items():
347 try:
347 try:
348 r = self.changelog.rev(n)
348 r = self.changelog.rev(n)
349 except:
349 except:
350 r = -2 # sort to the beginning of the list if unknown
350 r = -2 # sort to the beginning of the list if unknown
351 l.append((r, t, n))
351 l.append((r, t, n))
352 return [(t, n) for r, t, n in util.sort(l)]
352 return [(t, n) for r, t, n in util.sort(l)]
353
353
354 def nodetags(self, node):
354 def nodetags(self, node):
355 '''return the tags associated with a node'''
355 '''return the tags associated with a node'''
356 if not self.nodetagscache:
356 if not self.nodetagscache:
357 self.nodetagscache = {}
357 self.nodetagscache = {}
358 for t, n in self.tags().items():
358 for t, n in self.tags().items():
359 self.nodetagscache.setdefault(n, []).append(t)
359 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
360 return self.nodetagscache.get(node, [])
361
361
362 def _branchtags(self, partial, lrev):
362 def _branchtags(self, partial, lrev):
363 tiprev = len(self) - 1
363 tiprev = len(self) - 1
364 if lrev != tiprev:
364 if lrev != tiprev:
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367
367
368 return partial
368 return partial
369
369
370 def branchtags(self):
370 def branchtags(self):
371 tip = self.changelog.tip()
371 tip = self.changelog.tip()
372 if self.branchcache is not None and self._branchcachetip == tip:
372 if self.branchcache is not None and self._branchcachetip == tip:
373 return self.branchcache
373 return self.branchcache
374
374
375 oldtip = self._branchcachetip
375 oldtip = self._branchcachetip
376 self._branchcachetip = tip
376 self._branchcachetip = tip
377 if self.branchcache is None:
377 if self.branchcache is None:
378 self.branchcache = {} # avoid recursion in changectx
378 self.branchcache = {} # avoid recursion in changectx
379 else:
379 else:
380 self.branchcache.clear() # keep using the same dict
380 self.branchcache.clear() # keep using the same dict
381 if oldtip is None or oldtip not in self.changelog.nodemap:
381 if oldtip is None or oldtip not in self.changelog.nodemap:
382 partial, last, lrev = self._readbranchcache()
382 partial, last, lrev = self._readbranchcache()
383 else:
383 else:
384 lrev = self.changelog.rev(oldtip)
384 lrev = self.changelog.rev(oldtip)
385 partial = self._ubranchcache
385 partial = self._ubranchcache
386
386
387 self._branchtags(partial, lrev)
387 self._branchtags(partial, lrev)
388
388
389 # the branch cache is stored on disk as UTF-8, but in the local
389 # the branch cache is stored on disk as UTF-8, but in the local
390 # charset internally
390 # charset internally
391 for k, v in partial.items():
391 for k, v in partial.items():
392 self.branchcache[util.tolocal(k)] = v
392 self.branchcache[util.tolocal(k)] = v
393 self._ubranchcache = partial
393 self._ubranchcache = partial
394 return self.branchcache
394 return self.branchcache
395
395
396 def _readbranchcache(self):
396 def _readbranchcache(self):
397 partial = {}
397 partial = {}
398 try:
398 try:
399 f = self.opener("branch.cache")
399 f = self.opener("branch.cache")
400 lines = f.read().split('\n')
400 lines = f.read().split('\n')
401 f.close()
401 f.close()
402 except (IOError, OSError):
402 except (IOError, OSError):
403 return {}, nullid, nullrev
403 return {}, nullid, nullrev
404
404
405 try:
405 try:
406 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = lines.pop(0).split(" ", 1)
407 last, lrev = bin(last), int(lrev)
407 last, lrev = bin(last), int(lrev)
408 if lrev >= len(self) or self[lrev].node() != last:
408 if lrev >= len(self) or self[lrev].node() != last:
409 # invalidate the cache
409 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
411 for l in lines:
412 if not l: continue
412 if not l: continue
413 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
415 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
416 raise
416 raise
417 except Exception, inst:
417 except Exception, inst:
418 if self.ui.debugflag:
418 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
421 return partial, last, lrev
422
422
423 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
424 try:
424 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
429 f.rename()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 pass
431 pass
432
432
433 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
434 for r in xrange(start, end):
435 c = self[r]
435 c = self[r]
436 b = c.branch()
436 b = c.branch()
437 partial[b] = c.node()
437 partial[b] = c.node()
438
438
439 def lookup(self, key):
439 def lookup(self, key):
440 if isinstance(key, int):
440 if isinstance(key, int):
441 return self.changelog.node(key)
441 return self.changelog.node(key)
442 elif key == '.':
442 elif key == '.':
443 return self.dirstate.parents()[0]
443 return self.dirstate.parents()[0]
444 elif key == 'null':
444 elif key == 'null':
445 return nullid
445 return nullid
446 elif key == 'tip':
446 elif key == 'tip':
447 return self.changelog.tip()
447 return self.changelog.tip()
448 n = self.changelog._match(key)
448 n = self.changelog._match(key)
449 if n:
449 if n:
450 return n
450 return n
451 if key in self.tags():
451 if key in self.tags():
452 return self.tags()[key]
452 return self.tags()[key]
453 if key in self.branchtags():
453 if key in self.branchtags():
454 return self.branchtags()[key]
454 return self.branchtags()[key]
455 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
456 if n:
456 if n:
457 return n
457 return n
458 try:
458 try:
459 if len(key) == 20:
459 if len(key) == 20:
460 key = hex(key)
460 key = hex(key)
461 except:
461 except:
462 pass
462 pass
463 raise repo.RepoError(_("unknown revision '%s'") % key)
463 raise repo.RepoError(_("unknown revision '%s'") % key)
464
464
465 def local(self):
465 def local(self):
466 return True
466 return True
467
467
468 def join(self, f):
468 def join(self, f):
469 return os.path.join(self.path, f)
469 return os.path.join(self.path, f)
470
470
471 def wjoin(self, f):
471 def wjoin(self, f):
472 return os.path.join(self.root, f)
472 return os.path.join(self.root, f)
473
473
474 def rjoin(self, f):
474 def rjoin(self, f):
475 return os.path.join(self.root, util.pconvert(f))
475 return os.path.join(self.root, util.pconvert(f))
476
476
477 def file(self, f):
477 def file(self, f):
478 if f[0] == '/':
478 if f[0] == '/':
479 f = f[1:]
479 f = f[1:]
480 return filelog.filelog(self.sopener, f)
480 return filelog.filelog(self.sopener, f)
481
481
482 def changectx(self, changeid):
482 def changectx(self, changeid):
483 return self[changeid]
483 return self[changeid]
484
484
485 def parents(self, changeid=None):
485 def parents(self, changeid=None):
486 '''get list of changectxs for parents of changeid'''
486 '''get list of changectxs for parents of changeid'''
487 return self[changeid].parents()
487 return self[changeid].parents()
488
488
489 def filectx(self, path, changeid=None, fileid=None):
489 def filectx(self, path, changeid=None, fileid=None):
490 """changeid can be a changeset revision, node, or tag.
490 """changeid can be a changeset revision, node, or tag.
491 fileid can be a file revision or node."""
491 fileid can be a file revision or node."""
492 return context.filectx(self, path, changeid, fileid)
492 return context.filectx(self, path, changeid, fileid)
493
493
494 def getcwd(self):
494 def getcwd(self):
495 return self.dirstate.getcwd()
495 return self.dirstate.getcwd()
496
496
497 def pathto(self, f, cwd=None):
497 def pathto(self, f, cwd=None):
498 return self.dirstate.pathto(f, cwd)
498 return self.dirstate.pathto(f, cwd)
499
499
500 def wfile(self, f, mode='r'):
500 def wfile(self, f, mode='r'):
501 return self.wopener(f, mode)
501 return self.wopener(f, mode)
502
502
503 def _link(self, f):
503 def _link(self, f):
504 return os.path.islink(self.wjoin(f))
504 return os.path.islink(self.wjoin(f))
505
505
506 def _filter(self, filter, filename, data):
506 def _filter(self, filter, filename, data):
507 if filter not in self.filterpats:
507 if filter not in self.filterpats:
508 l = []
508 l = []
509 for pat, cmd in self.ui.configitems(filter):
509 for pat, cmd in self.ui.configitems(filter):
510 if cmd == '!':
510 if cmd == '!':
511 continue
511 continue
512 mf = util.matcher(self.root, "", [pat], [], [])[1]
512 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 fn = None
513 fn = None
514 params = cmd
514 params = cmd
515 for name, filterfn in self._datafilters.iteritems():
515 for name, filterfn in self._datafilters.iteritems():
516 if cmd.startswith(name):
516 if cmd.startswith(name):
517 fn = filterfn
517 fn = filterfn
518 params = cmd[len(name):].lstrip()
518 params = cmd[len(name):].lstrip()
519 break
519 break
520 if not fn:
520 if not fn:
521 fn = lambda s, c, **kwargs: util.filter(s, c)
521 fn = lambda s, c, **kwargs: util.filter(s, c)
522 # Wrap old filters not supporting keyword arguments
522 # Wrap old filters not supporting keyword arguments
523 if not inspect.getargspec(fn)[2]:
523 if not inspect.getargspec(fn)[2]:
524 oldfn = fn
524 oldfn = fn
525 fn = lambda s, c, **kwargs: oldfn(s, c)
525 fn = lambda s, c, **kwargs: oldfn(s, c)
526 l.append((mf, fn, params))
526 l.append((mf, fn, params))
527 self.filterpats[filter] = l
527 self.filterpats[filter] = l
528
528
529 for mf, fn, cmd in self.filterpats[filter]:
529 for mf, fn, cmd in self.filterpats[filter]:
530 if mf(filename):
530 if mf(filename):
531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
532 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
532 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
533 break
533 break
534
534
535 return data
535 return data
536
536
537 def adddatafilter(self, name, filter):
537 def adddatafilter(self, name, filter):
538 self._datafilters[name] = filter
538 self._datafilters[name] = filter
539
539
540 def wread(self, filename):
540 def wread(self, filename):
541 if self._link(filename):
541 if self._link(filename):
542 data = os.readlink(self.wjoin(filename))
542 data = os.readlink(self.wjoin(filename))
543 else:
543 else:
544 data = self.wopener(filename, 'r').read()
544 data = self.wopener(filename, 'r').read()
545 return self._filter("encode", filename, data)
545 return self._filter("encode", filename, data)
546
546
547 def wwrite(self, filename, data, flags):
547 def wwrite(self, filename, data, flags):
548 data = self._filter("decode", filename, data)
548 data = self._filter("decode", filename, data)
549 try:
549 try:
550 os.unlink(self.wjoin(filename))
550 os.unlink(self.wjoin(filename))
551 except OSError:
551 except OSError:
552 pass
552 pass
553 if 'l' in flags:
553 if 'l' in flags:
554 self.wopener.symlink(data, filename)
554 self.wopener.symlink(data, filename)
555 else:
555 else:
556 self.wopener(filename, 'w').write(data)
556 self.wopener(filename, 'w').write(data)
557 if 'x' in flags:
557 if 'x' in flags:
558 util.set_flags(self.wjoin(filename), False, True)
558 util.set_flags(self.wjoin(filename), False, True)
559
559
560 def wwritedata(self, filename, data):
560 def wwritedata(self, filename, data):
561 return self._filter("decode", filename, data)
561 return self._filter("decode", filename, data)
562
562
563 def transaction(self):
563 def transaction(self):
564 if self._transref and self._transref():
564 if self._transref and self._transref():
565 return self._transref().nest()
565 return self._transref().nest()
566
566
567 # abort here if the journal already exists
567 # abort here if the journal already exists
568 if os.path.exists(self.sjoin("journal")):
568 if os.path.exists(self.sjoin("journal")):
569 raise repo.RepoError(_("journal already exists - run hg recover"))
569 raise repo.RepoError(_("journal already exists - run hg recover"))
570
570
571 # save dirstate for rollback
571 # save dirstate for rollback
572 try:
572 try:
573 ds = self.opener("dirstate").read()
573 ds = self.opener("dirstate").read()
574 except IOError:
574 except IOError:
575 ds = ""
575 ds = ""
576 self.opener("journal.dirstate", "w").write(ds)
576 self.opener("journal.dirstate", "w").write(ds)
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
578
578
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 (self.join("journal.branch"), self.join("undo.branch"))]
581 (self.join("journal.branch"), self.join("undo.branch"))]
582 tr = transaction.transaction(self.ui.warn, self.sopener,
582 tr = transaction.transaction(self.ui.warn, self.sopener,
583 self.sjoin("journal"),
583 self.sjoin("journal"),
584 aftertrans(renames),
584 aftertrans(renames),
585 self.store.createmode)
585 self.store.createmode)
586 self._transref = weakref.ref(tr)
586 self._transref = weakref.ref(tr)
587 return tr
587 return tr
588
588
589 def recover(self):
589 def recover(self):
590 l = self.lock()
590 l = self.lock()
591 try:
591 try:
592 if os.path.exists(self.sjoin("journal")):
592 if os.path.exists(self.sjoin("journal")):
593 self.ui.status(_("rolling back interrupted transaction\n"))
593 self.ui.status(_("rolling back interrupted transaction\n"))
594 transaction.rollback(self.sopener, self.sjoin("journal"))
594 transaction.rollback(self.sopener, self.sjoin("journal"))
595 self.invalidate()
595 self.invalidate()
596 return True
596 return True
597 else:
597 else:
598 self.ui.warn(_("no interrupted transaction available\n"))
598 self.ui.warn(_("no interrupted transaction available\n"))
599 return False
599 return False
600 finally:
600 finally:
601 del l
601 del l
602
602
603 def rollback(self):
603 def rollback(self):
604 wlock = lock = None
604 wlock = lock = None
605 try:
605 try:
606 wlock = self.wlock()
606 wlock = self.wlock()
607 lock = self.lock()
607 lock = self.lock()
608 if os.path.exists(self.sjoin("undo")):
608 if os.path.exists(self.sjoin("undo")):
609 self.ui.status(_("rolling back last transaction\n"))
609 self.ui.status(_("rolling back last transaction\n"))
610 transaction.rollback(self.sopener, self.sjoin("undo"))
610 transaction.rollback(self.sopener, self.sjoin("undo"))
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 try:
612 try:
613 branch = self.opener("undo.branch").read()
613 branch = self.opener("undo.branch").read()
614 self.dirstate.setbranch(branch)
614 self.dirstate.setbranch(branch)
615 except IOError:
615 except IOError:
616 self.ui.warn(_("Named branch could not be reset, "
616 self.ui.warn(_("Named branch could not be reset, "
617 "current branch still is: %s\n")
617 "current branch still is: %s\n")
618 % util.tolocal(self.dirstate.branch()))
618 % util.tolocal(self.dirstate.branch()))
619 self.invalidate()
619 self.invalidate()
620 self.dirstate.invalidate()
620 self.dirstate.invalidate()
621 else:
621 else:
622 self.ui.warn(_("no rollback information available\n"))
622 self.ui.warn(_("no rollback information available\n"))
623 finally:
623 finally:
624 del lock, wlock
624 del lock, wlock
625
625
626 def invalidate(self):
626 def invalidate(self):
627 for a in "changelog manifest".split():
627 for a in "changelog manifest".split():
628 if a in self.__dict__:
628 if a in self.__dict__:
629 delattr(self, a)
629 delattr(self, a)
630 self.tagscache = None
630 self.tagscache = None
631 self._tagstypecache = None
631 self._tagstypecache = None
632 self.nodetagscache = None
632 self.nodetagscache = None
633 self.branchcache = None
633 self.branchcache = None
634 self._ubranchcache = None
634 self._ubranchcache = None
635 self._branchcachetip = None
635 self._branchcachetip = None
636
636
637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 try:
638 try:
639 l = lock.lock(lockname, 0, releasefn, desc=desc)
639 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 except lock.LockHeld, inst:
640 except lock.LockHeld, inst:
641 if not wait:
641 if not wait:
642 raise
642 raise
643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 (desc, inst.locker))
644 (desc, inst.locker))
645 # default to 600 seconds timeout
645 # default to 600 seconds timeout
646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 releasefn, desc=desc)
647 releasefn, desc=desc)
648 if acquirefn:
648 if acquirefn:
649 acquirefn()
649 acquirefn()
650 return l
650 return l
651
651
652 def lock(self, wait=True):
652 def lock(self, wait=True):
653 if self._lockref and self._lockref():
653 if self._lockref and self._lockref():
654 return self._lockref()
654 return self._lockref()
655
655
656 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
656 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
657 _('repository %s') % self.origroot)
657 _('repository %s') % self.origroot)
658 self._lockref = weakref.ref(l)
658 self._lockref = weakref.ref(l)
659 return l
659 return l
660
660
661 def wlock(self, wait=True):
661 def wlock(self, wait=True):
662 if self._wlockref and self._wlockref():
662 if self._wlockref and self._wlockref():
663 return self._wlockref()
663 return self._wlockref()
664
664
665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 self.dirstate.invalidate, _('working directory of %s') %
666 self.dirstate.invalidate, _('working directory of %s') %
667 self.origroot)
667 self.origroot)
668 self._wlockref = weakref.ref(l)
668 self._wlockref = weakref.ref(l)
669 return l
669 return l
670
670
671 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
671 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 """
672 """
673 commit an individual file as part of a larger transaction
673 commit an individual file as part of a larger transaction
674 """
674 """
675
675
676 fn = fctx.path()
676 fn = fctx.path()
677 t = fctx.data()
677 t = fctx.data()
678 fl = self.file(fn)
678 fl = self.file(fn)
679 fp1 = manifest1.get(fn, nullid)
679 fp1 = manifest1.get(fn, nullid)
680 fp2 = manifest2.get(fn, nullid)
680 fp2 = manifest2.get(fn, nullid)
681
681
682 meta = {}
682 meta = {}
683 cp = fctx.renamed()
683 cp = fctx.renamed()
684 if cp and cp[0] != fn:
684 if cp and cp[0] != fn:
685 # Mark the new revision of this file as a copy of another
685 # Mark the new revision of this file as a copy of another
686 # file. This copy data will effectively act as a parent
686 # file. This copy data will effectively act as a parent
687 # of this new revision. If this is a merge, the first
687 # of this new revision. If this is a merge, the first
688 # parent will be the nullid (meaning "look up the copy data")
688 # parent will be the nullid (meaning "look up the copy data")
689 # and the second one will be the other parent. For example:
689 # and the second one will be the other parent. For example:
690 #
690 #
691 # 0 --- 1 --- 3 rev1 changes file foo
691 # 0 --- 1 --- 3 rev1 changes file foo
692 # \ / rev2 renames foo to bar and changes it
692 # \ / rev2 renames foo to bar and changes it
693 # \- 2 -/ rev3 should have bar with all changes and
693 # \- 2 -/ rev3 should have bar with all changes and
694 # should record that bar descends from
694 # should record that bar descends from
695 # bar in rev2 and foo in rev1
695 # bar in rev2 and foo in rev1
696 #
696 #
697 # this allows this merge to succeed:
697 # this allows this merge to succeed:
698 #
698 #
699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 # \ / merging rev3 and rev4 should use bar@rev2
700 # \ / merging rev3 and rev4 should use bar@rev2
701 # \- 2 --- 4 as the merge base
701 # \- 2 --- 4 as the merge base
702 #
702 #
703
703
704 cf = cp[0]
704 cf = cp[0]
705 cr = manifest1.get(cf)
705 cr = manifest1.get(cf)
706 nfp = fp2
706 nfp = fp2
707
707
708 if manifest2: # branch merge
708 if manifest2: # branch merge
709 if fp2 == nullid: # copied on remote side
709 if fp2 == nullid: # copied on remote side
710 if fp1 != nullid or cf in manifest2:
710 if fp1 != nullid or cf in manifest2:
711 cr = manifest2[cf]
711 cr = manifest2[cf]
712 nfp = fp1
712 nfp = fp1
713
713
714 # find source in nearest ancestor if we've lost track
714 # find source in nearest ancestor if we've lost track
715 if not cr:
715 if not cr:
716 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
716 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
717 (fn, cf))
717 (fn, cf))
718 for a in self['.'].ancestors():
718 for a in self['.'].ancestors():
719 if cf in a:
719 if cf in a:
720 cr = a[cf].filenode()
720 cr = a[cf].filenode()
721 break
721 break
722
722
723 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
723 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
724 meta["copy"] = cf
724 meta["copy"] = cf
725 meta["copyrev"] = hex(cr)
725 meta["copyrev"] = hex(cr)
726 fp1, fp2 = nullid, nfp
726 fp1, fp2 = nullid, nfp
727 elif fp2 != nullid:
727 elif fp2 != nullid:
728 # is one parent an ancestor of the other?
728 # is one parent an ancestor of the other?
729 fpa = fl.ancestor(fp1, fp2)
729 fpa = fl.ancestor(fp1, fp2)
730 if fpa == fp1:
730 if fpa == fp1:
731 fp1, fp2 = fp2, nullid
731 fp1, fp2 = fp2, nullid
732 elif fpa == fp2:
732 elif fpa == fp2:
733 fp2 = nullid
733 fp2 = nullid
734
734
735 # is the file unmodified from the parent? report existing entry
735 # is the file unmodified from the parent? report existing entry
736 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
736 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
737 return fp1
737 return fp1
738
738
739 changelist.append(fn)
739 changelist.append(fn)
740 return fl.add(t, meta, tr, linkrev, fp1, fp2)
740 return fl.add(t, meta, tr, linkrev, fp1, fp2)
741
741
742 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
742 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
743 if p1 is None:
743 if p1 is None:
744 p1, p2 = self.dirstate.parents()
744 p1, p2 = self.dirstate.parents()
745 return self.commit(files=files, text=text, user=user, date=date,
745 return self.commit(files=files, text=text, user=user, date=date,
746 p1=p1, p2=p2, extra=extra, empty_ok=True)
746 p1=p1, p2=p2, extra=extra, empty_ok=True)
747
747
748 def commit(self, files=None, text="", user=None, date=None,
748 def commit(self, files=None, text="", user=None, date=None,
749 match=None, force=False, force_editor=False,
749 match=None, force=False, force_editor=False,
750 p1=None, p2=None, extra={}, empty_ok=False):
750 p1=None, p2=None, extra={}, empty_ok=False):
751 wlock = lock = None
751 wlock = lock = None
752 if files:
752 if files:
753 files = util.unique(files)
753 files = util.unique(files)
754 try:
754 try:
755 wlock = self.wlock()
755 wlock = self.wlock()
756 lock = self.lock()
756 lock = self.lock()
757 use_dirstate = (p1 is None) # not rawcommit
757 use_dirstate = (p1 is None) # not rawcommit
758
758
759 if use_dirstate:
759 if use_dirstate:
760 p1, p2 = self.dirstate.parents()
760 p1, p2 = self.dirstate.parents()
761 update_dirstate = True
761 update_dirstate = True
762
762
763 if (not force and p2 != nullid and
763 if (not force and p2 != nullid and
764 (match and (match.files() or match.anypats()))):
764 (match and (match.files() or match.anypats()))):
765 raise util.Abort(_('cannot partially commit a merge '
765 raise util.Abort(_('cannot partially commit a merge '
766 '(do not specify files or patterns)'))
766 '(do not specify files or patterns)'))
767
767
768 if files:
768 if files:
769 modified, removed = [], []
769 modified, removed = [], []
770 for f in files:
770 for f in files:
771 s = self.dirstate[f]
771 s = self.dirstate[f]
772 if s in 'nma':
772 if s in 'nma':
773 modified.append(f)
773 modified.append(f)
774 elif s == 'r':
774 elif s == 'r':
775 removed.append(f)
775 removed.append(f)
776 else:
776 else:
777 self.ui.warn(_("%s not tracked!\n") % f)
777 self.ui.warn(_("%s not tracked!\n") % f)
778 changes = [modified, [], removed, [], []]
778 changes = [modified, [], removed, [], []]
779 else:
779 else:
780 changes = self.status(match=match)
780 changes = self.status(match=match)
781 else:
781 else:
782 p1, p2 = p1, p2 or nullid
782 p1, p2 = p1, p2 or nullid
783 update_dirstate = (self.dirstate.parents()[0] == p1)
783 update_dirstate = (self.dirstate.parents()[0] == p1)
784 changes = [files, [], [], [], []]
784 changes = [files, [], [], [], []]
785
785
786 ms = merge_.mergestate(self)
786 ms = merge_.mergestate(self)
787 for f in changes[0]:
787 for f in changes[0]:
788 if f in ms and ms[f] == 'u':
788 if f in ms and ms[f] == 'u':
789 raise util.Abort(_("unresolved merge conflicts "
789 raise util.Abort(_("unresolved merge conflicts "
790 "(see hg resolve)"))
790 "(see hg resolve)"))
791 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 wctx = context.workingctx(self, (p1, p2), text, user, date,
792 extra, changes)
792 extra, changes)
793 return self._commitctx(wctx, force, force_editor, empty_ok,
793 return self._commitctx(wctx, force, force_editor, empty_ok,
794 use_dirstate, update_dirstate)
794 use_dirstate, update_dirstate)
795 finally:
795 finally:
796 del lock, wlock
796 del lock, wlock
797
797
798 def commitctx(self, ctx):
798 def commitctx(self, ctx):
799 """Add a new revision to current repository.
799 """Add a new revision to current repository.
800
800
801 Revision information is passed in the context.memctx argument.
801 Revision information is passed in the context.memctx argument.
802 commitctx() does not touch the working directory.
802 commitctx() does not touch the working directory.
803 """
803 """
804 wlock = lock = None
804 wlock = lock = None
805 try:
805 try:
806 wlock = self.wlock()
806 wlock = self.wlock()
807 lock = self.lock()
807 lock = self.lock()
808 return self._commitctx(ctx, force=True, force_editor=False,
808 return self._commitctx(ctx, force=True, force_editor=False,
809 empty_ok=True, use_dirstate=False,
809 empty_ok=True, use_dirstate=False,
810 update_dirstate=False)
810 update_dirstate=False)
811 finally:
811 finally:
812 del lock, wlock
812 del lock, wlock
813
813
814 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
814 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
815 use_dirstate=True, update_dirstate=True):
815 use_dirstate=True, update_dirstate=True):
816 tr = None
816 tr = None
817 valid = 0 # don't save the dirstate if this isn't set
817 valid = 0 # don't save the dirstate if this isn't set
818 try:
818 try:
819 commit = util.sort(wctx.modified() + wctx.added())
819 commit = util.sort(wctx.modified() + wctx.added())
820 remove = wctx.removed()
820 remove = wctx.removed()
821 extra = wctx.extra().copy()
821 extra = wctx.extra().copy()
822 branchname = extra['branch']
822 branchname = extra['branch']
823 user = wctx.user()
823 user = wctx.user()
824 text = wctx.description()
824 text = wctx.description()
825
825
826 p1, p2 = [p.node() for p in wctx.parents()]
826 p1, p2 = [p.node() for p in wctx.parents()]
827 c1 = self.changelog.read(p1)
827 c1 = self.changelog.read(p1)
828 c2 = self.changelog.read(p2)
828 c2 = self.changelog.read(p2)
829 m1 = self.manifest.read(c1[0]).copy()
829 m1 = self.manifest.read(c1[0]).copy()
830 m2 = self.manifest.read(c2[0])
830 m2 = self.manifest.read(c2[0])
831
831
832 if use_dirstate:
832 if use_dirstate:
833 oldname = c1[5].get("branch") # stored in UTF-8
833 oldname = c1[5].get("branch") # stored in UTF-8
834 if (not commit and not remove and not force and p2 == nullid
834 if (not commit and not remove and not force and p2 == nullid
835 and branchname == oldname):
835 and branchname == oldname):
836 self.ui.status(_("nothing changed\n"))
836 self.ui.status(_("nothing changed\n"))
837 return None
837 return None
838
838
839 xp1 = hex(p1)
839 xp1 = hex(p1)
840 if p2 == nullid: xp2 = ''
840 if p2 == nullid: xp2 = ''
841 else: xp2 = hex(p2)
841 else: xp2 = hex(p2)
842
842
843 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
843 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
844
844
845 tr = self.transaction()
845 tr = self.transaction()
846 trp = weakref.proxy(tr)
846 trp = weakref.proxy(tr)
847
847
848 # check in files
848 # check in files
849 new = {}
849 new = {}
850 changed = []
850 changed = []
851 linkrev = len(self)
851 linkrev = len(self)
852 for f in commit:
852 for f in commit:
853 self.ui.note(f + "\n")
853 self.ui.note(f + "\n")
854 try:
854 try:
855 fctx = wctx.filectx(f)
855 fctx = wctx.filectx(f)
856 newflags = fctx.flags()
856 newflags = fctx.flags()
857 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
857 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
858 if ((not changed or changed[-1] != f) and
858 if ((not changed or changed[-1] != f) and
859 m2.get(f) != new[f]):
859 m2.get(f) != new[f]):
860 # mention the file in the changelog if some
860 # mention the file in the changelog if some
861 # flag changed, even if there was no content
861 # flag changed, even if there was no content
862 # change.
862 # change.
863 if m1.flags(f) != newflags:
863 if m1.flags(f) != newflags:
864 changed.append(f)
864 changed.append(f)
865 m1.set(f, newflags)
865 m1.set(f, newflags)
866 if use_dirstate:
866 if use_dirstate:
867 self.dirstate.normal(f)
867 self.dirstate.normal(f)
868
868
869 except (OSError, IOError):
869 except (OSError, IOError):
870 if use_dirstate:
870 if use_dirstate:
871 self.ui.warn(_("trouble committing %s!\n") % f)
871 self.ui.warn(_("trouble committing %s!\n") % f)
872 raise
872 raise
873 else:
873 else:
874 remove.append(f)
874 remove.append(f)
875
875
876 updated, added = [], []
876 updated, added = [], []
877 for f in util.sort(changed):
877 for f in util.sort(changed):
878 if f in m1 or f in m2:
878 if f in m1 or f in m2:
879 updated.append(f)
879 updated.append(f)
880 else:
880 else:
881 added.append(f)
881 added.append(f)
882
882
883 # update manifest
883 # update manifest
884 m1.update(new)
884 m1.update(new)
885 removed = []
885 removed = []
886
886
887 for f in util.sort(remove):
887 for f in util.sort(remove):
888 if f in m1:
888 if f in m1:
889 del m1[f]
889 del m1[f]
890 removed.append(f)
890 removed.append(f)
891 elif f in m2:
891 elif f in m2:
892 removed.append(f)
892 removed.append(f)
893 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
893 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
894 (new, removed))
894 (new, removed))
895
895
896 # add changeset
896 # add changeset
897 if (not empty_ok and not text) or force_editor:
897 if (not empty_ok and not text) or force_editor:
898 edittext = []
898 edittext = []
899 if text:
899 if text:
900 edittext.append(text)
900 edittext.append(text)
901 edittext.append("")
901 edittext.append("")
902 edittext.append("") # Empty line between message and comments.
902 edittext.append("") # Empty line between message and comments.
903 edittext.append(_("HG: Enter commit message."
903 edittext.append(_("HG: Enter commit message."
904 " Lines beginning with 'HG:' are removed."))
904 " Lines beginning with 'HG:' are removed."))
905 edittext.append("HG: --")
905 edittext.append("HG: --")
906 edittext.append("HG: user: %s" % user)
906 edittext.append("HG: user: %s" % user)
907 if p2 != nullid:
907 if p2 != nullid:
908 edittext.append("HG: branch merge")
908 edittext.append("HG: branch merge")
909 if branchname:
909 if branchname:
910 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
910 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
911 edittext.extend(["HG: added %s" % f for f in added])
911 edittext.extend(["HG: added %s" % f for f in added])
912 edittext.extend(["HG: changed %s" % f for f in updated])
912 edittext.extend(["HG: changed %s" % f for f in updated])
913 edittext.extend(["HG: removed %s" % f for f in removed])
913 edittext.extend(["HG: removed %s" % f for f in removed])
914 if not added and not updated and not removed:
914 if not added and not updated and not removed:
915 edittext.append("HG: no files changed")
915 edittext.append("HG: no files changed")
916 edittext.append("")
916 edittext.append("")
917 # run editor in the repository root
917 # run editor in the repository root
918 olddir = os.getcwd()
918 olddir = os.getcwd()
919 os.chdir(self.root)
919 os.chdir(self.root)
920 text = self.ui.edit("\n".join(edittext), user)
920 text = self.ui.edit("\n".join(edittext), user)
921 os.chdir(olddir)
921 os.chdir(olddir)
922
922
923 lines = [line.rstrip() for line in text.rstrip().splitlines()]
923 lines = [line.rstrip() for line in text.rstrip().splitlines()]
924 while lines and not lines[0]:
924 while lines and not lines[0]:
925 del lines[0]
925 del lines[0]
926 if not lines and use_dirstate:
926 if not lines and use_dirstate:
927 raise util.Abort(_("empty commit message"))
927 raise util.Abort(_("empty commit message"))
928 text = '\n'.join(lines)
928 text = '\n'.join(lines)
929
929
930 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
930 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
931 user, wctx.date(), extra)
931 user, wctx.date(), extra)
932 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
932 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
933 parent2=xp2)
933 parent2=xp2)
934 tr.close()
934 tr.close()
935
935
936 if self.branchcache:
936 if self.branchcache:
937 self.branchtags()
937 self.branchtags()
938
938
939 if use_dirstate or update_dirstate:
939 if use_dirstate or update_dirstate:
940 self.dirstate.setparents(n)
940 self.dirstate.setparents(n)
941 if use_dirstate:
941 if use_dirstate:
942 for f in removed:
942 for f in removed:
943 self.dirstate.forget(f)
943 self.dirstate.forget(f)
944 valid = 1 # our dirstate updates are complete
944 valid = 1 # our dirstate updates are complete
945
945
946 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
946 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
947 return n
947 return n
948 finally:
948 finally:
949 if not valid: # don't save our updated dirstate
949 if not valid: # don't save our updated dirstate
950 self.dirstate.invalidate()
950 self.dirstate.invalidate()
951 del tr
951 del tr
952
952
953 def walk(self, match, node=None):
953 def walk(self, match, node=None):
954 '''
954 '''
955 walk recursively through the directory tree or a given
955 walk recursively through the directory tree or a given
956 changeset, finding all files matched by the match
956 changeset, finding all files matched by the match
957 function
957 function
958 '''
958 '''
959 return self[node].walk(match)
959 return self[node].walk(match)
960
960
961 def status(self, node1='.', node2=None, match=None,
961 def status(self, node1='.', node2=None, match=None,
962 ignored=False, clean=False, unknown=False):
962 ignored=False, clean=False, unknown=False):
963 """return status of files between two nodes or node and working directory
963 """return status of files between two nodes or node and working directory
964
964
965 If node1 is None, use the first dirstate parent instead.
965 If node1 is None, use the first dirstate parent instead.
966 If node2 is None, compare node1 with working directory.
966 If node2 is None, compare node1 with working directory.
967 """
967 """
968
968
969 def mfmatches(ctx):
969 def mfmatches(ctx):
970 mf = ctx.manifest().copy()
970 mf = ctx.manifest().copy()
971 for fn in mf.keys():
971 for fn in mf.keys():
972 if not match(fn):
972 if not match(fn):
973 del mf[fn]
973 del mf[fn]
974 return mf
974 return mf
975
975
976 if isinstance(node1, context.changectx):
976 if isinstance(node1, context.changectx):
977 ctx1 = node1
977 ctx1 = node1
978 else:
978 else:
979 ctx1 = self[node1]
979 ctx1 = self[node1]
980 if isinstance(node2, context.changectx):
980 if isinstance(node2, context.changectx):
981 ctx2 = node2
981 ctx2 = node2
982 else:
982 else:
983 ctx2 = self[node2]
983 ctx2 = self[node2]
984
984
985 working = ctx2.rev() is None
985 working = ctx2.rev() is None
986 parentworking = working and ctx1 == self['.']
986 parentworking = working and ctx1 == self['.']
987 match = match or match_.always(self.root, self.getcwd())
987 match = match or match_.always(self.root, self.getcwd())
988 listignored, listclean, listunknown = ignored, clean, unknown
988 listignored, listclean, listunknown = ignored, clean, unknown
989
989
990 # load earliest manifest first for caching reasons
990 # load earliest manifest first for caching reasons
991 if not working and ctx2.rev() < ctx1.rev():
991 if not working and ctx2.rev() < ctx1.rev():
992 ctx2.manifest()
992 ctx2.manifest()
993
993
994 if not parentworking:
994 if not parentworking:
995 def bad(f, msg):
995 def bad(f, msg):
996 if f not in ctx1:
996 if f not in ctx1:
997 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
997 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
998 return False
998 return False
999 match.bad = bad
999 match.bad = bad
1000
1000
1001 if working: # we need to scan the working dir
1001 if working: # we need to scan the working dir
1002 s = self.dirstate.status(match, listignored, listclean, listunknown)
1002 s = self.dirstate.status(match, listignored, listclean, listunknown)
1003 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1003 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1004
1004
1005 # check for any possibly clean files
1005 # check for any possibly clean files
1006 if parentworking and cmp:
1006 if parentworking and cmp:
1007 fixup = []
1007 fixup = []
1008 # do a full compare of any files that might have changed
1008 # do a full compare of any files that might have changed
1009 for f in cmp:
1009 for f in cmp:
1010 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1010 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1011 or ctx1[f].cmp(ctx2[f].data())):
1011 or ctx1[f].cmp(ctx2[f].data())):
1012 modified.append(f)
1012 modified.append(f)
1013 else:
1013 else:
1014 fixup.append(f)
1014 fixup.append(f)
1015
1015
1016 if listclean:
1016 if listclean:
1017 clean += fixup
1017 clean += fixup
1018
1018
1019 # update dirstate for files that are actually clean
1019 # update dirstate for files that are actually clean
1020 if fixup:
1020 if fixup:
1021 wlock = None
1021 wlock = None
1022 try:
1022 try:
1023 try:
1023 try:
1024 wlock = self.wlock(False)
1024 wlock = self.wlock(False)
1025 for f in fixup:
1025 for f in fixup:
1026 self.dirstate.normal(f)
1026 self.dirstate.normal(f)
1027 except lock.LockException:
1027 except lock.LockException:
1028 pass
1028 pass
1029 finally:
1029 finally:
1030 del wlock
1030 del wlock
1031
1031
1032 if not parentworking:
1032 if not parentworking:
1033 mf1 = mfmatches(ctx1)
1033 mf1 = mfmatches(ctx1)
1034 if working:
1034 if working:
1035 # we are comparing working dir against non-parent
1035 # we are comparing working dir against non-parent
1036 # generate a pseudo-manifest for the working dir
1036 # generate a pseudo-manifest for the working dir
1037 mf2 = mfmatches(self['.'])
1037 mf2 = mfmatches(self['.'])
1038 for f in cmp + modified + added:
1038 for f in cmp + modified + added:
1039 mf2[f] = None
1039 mf2[f] = None
1040 mf2.set(f, ctx2.flags(f))
1040 mf2.set(f, ctx2.flags(f))
1041 for f in removed:
1041 for f in removed:
1042 if f in mf2:
1042 if f in mf2:
1043 del mf2[f]
1043 del mf2[f]
1044 else:
1044 else:
1045 # we are comparing two revisions
1045 # we are comparing two revisions
1046 deleted, unknown, ignored = [], [], []
1046 deleted, unknown, ignored = [], [], []
1047 mf2 = mfmatches(ctx2)
1047 mf2 = mfmatches(ctx2)
1048
1048
1049 modified, added, clean = [], [], []
1049 modified, added, clean = [], [], []
1050 for fn in mf2:
1050 for fn in mf2:
1051 if fn in mf1:
1051 if fn in mf1:
1052 if (mf1.flags(fn) != mf2.flags(fn) or
1052 if (mf1.flags(fn) != mf2.flags(fn) or
1053 (mf1[fn] != mf2[fn] and
1053 (mf1[fn] != mf2[fn] and
1054 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1054 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1055 modified.append(fn)
1055 modified.append(fn)
1056 elif listclean:
1056 elif listclean:
1057 clean.append(fn)
1057 clean.append(fn)
1058 del mf1[fn]
1058 del mf1[fn]
1059 else:
1059 else:
1060 added.append(fn)
1060 added.append(fn)
1061 removed = mf1.keys()
1061 removed = mf1.keys()
1062
1062
1063 r = modified, added, removed, deleted, unknown, ignored, clean
1063 r = modified, added, removed, deleted, unknown, ignored, clean
1064 [l.sort() for l in r]
1064 [l.sort() for l in r]
1065 return r
1065 return r
1066
1066
1067 def add(self, list):
1067 def add(self, list):
1068 wlock = self.wlock()
1068 wlock = self.wlock()
1069 try:
1069 try:
1070 rejected = []
1070 rejected = []
1071 for f in list:
1071 for f in list:
1072 p = self.wjoin(f)
1072 p = self.wjoin(f)
1073 try:
1073 try:
1074 st = os.lstat(p)
1074 st = os.lstat(p)
1075 except:
1075 except:
1076 self.ui.warn(_("%s does not exist!\n") % f)
1076 self.ui.warn(_("%s does not exist!\n") % f)
1077 rejected.append(f)
1077 rejected.append(f)
1078 continue
1078 continue
1079 if st.st_size > 10000000:
1079 if st.st_size > 10000000:
1080 self.ui.warn(_("%s: files over 10MB may cause memory and"
1080 self.ui.warn(_("%s: files over 10MB may cause memory and"
1081 " performance problems\n"
1081 " performance problems\n"
1082 "(use 'hg revert %s' to unadd the file)\n")
1082 "(use 'hg revert %s' to unadd the file)\n")
1083 % (f, f))
1083 % (f, f))
1084 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1084 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1085 self.ui.warn(_("%s not added: only files and symlinks "
1085 self.ui.warn(_("%s not added: only files and symlinks "
1086 "supported currently\n") % f)
1086 "supported currently\n") % f)
1087 rejected.append(p)
1087 rejected.append(p)
1088 elif self.dirstate[f] in 'amn':
1088 elif self.dirstate[f] in 'amn':
1089 self.ui.warn(_("%s already tracked!\n") % f)
1089 self.ui.warn(_("%s already tracked!\n") % f)
1090 elif self.dirstate[f] == 'r':
1090 elif self.dirstate[f] == 'r':
1091 self.dirstate.normallookup(f)
1091 self.dirstate.normallookup(f)
1092 else:
1092 else:
1093 self.dirstate.add(f)
1093 self.dirstate.add(f)
1094 return rejected
1094 return rejected
1095 finally:
1095 finally:
1096 del wlock
1096 del wlock
1097
1097
1098 def forget(self, list):
1098 def forget(self, list):
1099 wlock = self.wlock()
1099 wlock = self.wlock()
1100 try:
1100 try:
1101 for f in list:
1101 for f in list:
1102 if self.dirstate[f] != 'a':
1102 if self.dirstate[f] != 'a':
1103 self.ui.warn(_("%s not added!\n") % f)
1103 self.ui.warn(_("%s not added!\n") % f)
1104 else:
1104 else:
1105 self.dirstate.forget(f)
1105 self.dirstate.forget(f)
1106 finally:
1106 finally:
1107 del wlock
1107 del wlock
1108
1108
1109 def remove(self, list, unlink=False):
1109 def remove(self, list, unlink=False):
1110 wlock = None
1110 wlock = None
1111 try:
1111 try:
1112 if unlink:
1112 if unlink:
1113 for f in list:
1113 for f in list:
1114 try:
1114 try:
1115 util.unlink(self.wjoin(f))
1115 util.unlink(self.wjoin(f))
1116 except OSError, inst:
1116 except OSError, inst:
1117 if inst.errno != errno.ENOENT:
1117 if inst.errno != errno.ENOENT:
1118 raise
1118 raise
1119 wlock = self.wlock()
1119 wlock = self.wlock()
1120 for f in list:
1120 for f in list:
1121 if unlink and os.path.exists(self.wjoin(f)):
1121 if unlink and os.path.exists(self.wjoin(f)):
1122 self.ui.warn(_("%s still exists!\n") % f)
1122 self.ui.warn(_("%s still exists!\n") % f)
1123 elif self.dirstate[f] == 'a':
1123 elif self.dirstate[f] == 'a':
1124 self.dirstate.forget(f)
1124 self.dirstate.forget(f)
1125 elif f not in self.dirstate:
1125 elif f not in self.dirstate:
1126 self.ui.warn(_("%s not tracked!\n") % f)
1126 self.ui.warn(_("%s not tracked!\n") % f)
1127 else:
1127 else:
1128 self.dirstate.remove(f)
1128 self.dirstate.remove(f)
1129 finally:
1129 finally:
1130 del wlock
1130 del wlock
1131
1131
1132 def undelete(self, list):
1132 def undelete(self, list):
1133 wlock = None
1133 wlock = None
1134 try:
1134 try:
1135 manifests = [self.manifest.read(self.changelog.read(p)[0])
1135 manifests = [self.manifest.read(self.changelog.read(p)[0])
1136 for p in self.dirstate.parents() if p != nullid]
1136 for p in self.dirstate.parents() if p != nullid]
1137 wlock = self.wlock()
1137 wlock = self.wlock()
1138 for f in list:
1138 for f in list:
1139 if self.dirstate[f] != 'r':
1139 if self.dirstate[f] != 'r':
1140 self.ui.warn(_("%s not removed!\n") % f)
1140 self.ui.warn(_("%s not removed!\n") % f)
1141 else:
1141 else:
1142 m = f in manifests[0] and manifests[0] or manifests[1]
1142 m = f in manifests[0] and manifests[0] or manifests[1]
1143 t = self.file(f).read(m[f])
1143 t = self.file(f).read(m[f])
1144 self.wwrite(f, t, m.flags(f))
1144 self.wwrite(f, t, m.flags(f))
1145 self.dirstate.normal(f)
1145 self.dirstate.normal(f)
1146 finally:
1146 finally:
1147 del wlock
1147 del wlock
1148
1148
1149 def copy(self, source, dest):
1149 def copy(self, source, dest):
1150 wlock = None
1150 wlock = None
1151 try:
1151 try:
1152 p = self.wjoin(dest)
1152 p = self.wjoin(dest)
1153 if not (os.path.exists(p) or os.path.islink(p)):
1153 if not (os.path.exists(p) or os.path.islink(p)):
1154 self.ui.warn(_("%s does not exist!\n") % dest)
1154 self.ui.warn(_("%s does not exist!\n") % dest)
1155 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 elif not (os.path.isfile(p) or os.path.islink(p)):
1156 self.ui.warn(_("copy failed: %s is not a file or a "
1156 self.ui.warn(_("copy failed: %s is not a file or a "
1157 "symbolic link\n") % dest)
1157 "symbolic link\n") % dest)
1158 else:
1158 else:
1159 wlock = self.wlock()
1159 wlock = self.wlock()
1160 if self.dirstate[dest] in '?r':
1160 if self.dirstate[dest] in '?r':
1161 self.dirstate.add(dest)
1161 self.dirstate.add(dest)
1162 self.dirstate.copy(source, dest)
1162 self.dirstate.copy(source, dest)
1163 finally:
1163 finally:
1164 del wlock
1164 del wlock
1165
1165
1166 def heads(self, start=None):
1166 def heads(self, start=None):
1167 heads = self.changelog.heads(start)
1167 heads = self.changelog.heads(start)
1168 # sort the output in rev descending order
1168 # sort the output in rev descending order
1169 heads = [(-self.changelog.rev(h), h) for h in heads]
1169 heads = [(-self.changelog.rev(h), h) for h in heads]
1170 return [n for (r, n) in util.sort(heads)]
1170 return [n for (r, n) in util.sort(heads)]
1171
1171
1172 def branchheads(self, branch=None, start=None):
1172 def branchheads(self, branch=None, start=None):
1173 if branch is None:
1173 if branch is None:
1174 branch = self[None].branch()
1174 branch = self[None].branch()
1175 branches = self.branchtags()
1175 branches = self.branchtags()
1176 if branch not in branches:
1176 if branch not in branches:
1177 return []
1177 return []
1178 # The basic algorithm is this:
1178 # The basic algorithm is this:
1179 #
1179 #
1180 # Start from the branch tip since there are no later revisions that can
1180 # Start from the branch tip since there are no later revisions that can
1181 # possibly be in this branch, and the tip is a guaranteed head.
1181 # possibly be in this branch, and the tip is a guaranteed head.
1182 #
1182 #
1183 # Remember the tip's parents as the first ancestors, since these by
1183 # Remember the tip's parents as the first ancestors, since these by
1184 # definition are not heads.
1184 # definition are not heads.
1185 #
1185 #
1186 # Step backwards from the brach tip through all the revisions. We are
1186 # Step backwards from the brach tip through all the revisions. We are
1187 # guaranteed by the rules of Mercurial that we will now be visiting the
1187 # guaranteed by the rules of Mercurial that we will now be visiting the
1188 # nodes in reverse topological order (children before parents).
1188 # nodes in reverse topological order (children before parents).
1189 #
1189 #
1190 # If a revision is one of the ancestors of a head then we can toss it
1190 # If a revision is one of the ancestors of a head then we can toss it
1191 # out of the ancestors set (we've already found it and won't be
1191 # out of the ancestors set (we've already found it and won't be
1192 # visiting it again) and put its parents in the ancestors set.
1192 # visiting it again) and put its parents in the ancestors set.
1193 #
1193 #
1194 # Otherwise, if a revision is in the branch it's another head, since it
1194 # Otherwise, if a revision is in the branch it's another head, since it
1195 # wasn't in the ancestor list of an existing head. So add it to the
1195 # wasn't in the ancestor list of an existing head. So add it to the
1196 # head list, and add its parents to the ancestor list.
1196 # head list, and add its parents to the ancestor list.
1197 #
1197 #
1198 # If it is not in the branch ignore it.
1198 # If it is not in the branch ignore it.
1199 #
1199 #
1200 # Once we have a list of heads, use nodesbetween to filter out all the
1200 # Once we have a list of heads, use nodesbetween to filter out all the
1201 # heads that cannot be reached from startrev. There may be a more
1201 # heads that cannot be reached from startrev. There may be a more
1202 # efficient way to do this as part of the previous algorithm.
1202 # efficient way to do this as part of the previous algorithm.
1203
1203
1204 set = util.set
1204 set = util.set
1205 heads = [self.changelog.rev(branches[branch])]
1205 heads = [self.changelog.rev(branches[branch])]
1206 # Don't care if ancestors contains nullrev or not.
1206 # Don't care if ancestors contains nullrev or not.
1207 ancestors = set(self.changelog.parentrevs(heads[0]))
1207 ancestors = set(self.changelog.parentrevs(heads[0]))
1208 for rev in xrange(heads[0] - 1, nullrev, -1):
1208 for rev in xrange(heads[0] - 1, nullrev, -1):
1209 if rev in ancestors:
1209 if rev in ancestors:
1210 ancestors.update(self.changelog.parentrevs(rev))
1210 ancestors.update(self.changelog.parentrevs(rev))
1211 ancestors.remove(rev)
1211 ancestors.remove(rev)
1212 elif self[rev].branch() == branch:
1212 elif self[rev].branch() == branch:
1213 heads.append(rev)
1213 heads.append(rev)
1214 ancestors.update(self.changelog.parentrevs(rev))
1214 ancestors.update(self.changelog.parentrevs(rev))
1215 heads = [self.changelog.node(rev) for rev in heads]
1215 heads = [self.changelog.node(rev) for rev in heads]
1216 if start is not None:
1216 if start is not None:
1217 heads = self.changelog.nodesbetween([start], heads)[2]
1217 heads = self.changelog.nodesbetween([start], heads)[2]
1218 return heads
1218 return heads
1219
1219
1220 def branches(self, nodes):
1220 def branches(self, nodes):
1221 if not nodes:
1221 if not nodes:
1222 nodes = [self.changelog.tip()]
1222 nodes = [self.changelog.tip()]
1223 b = []
1223 b = []
1224 for n in nodes:
1224 for n in nodes:
1225 t = n
1225 t = n
1226 while 1:
1226 while 1:
1227 p = self.changelog.parents(n)
1227 p = self.changelog.parents(n)
1228 if p[1] != nullid or p[0] == nullid:
1228 if p[1] != nullid or p[0] == nullid:
1229 b.append((t, n, p[0], p[1]))
1229 b.append((t, n, p[0], p[1]))
1230 break
1230 break
1231 n = p[0]
1231 n = p[0]
1232 return b
1232 return b
1233
1233
1234 def between(self, pairs):
1234 def between(self, pairs):
1235 r = []
1235 r = []
1236
1236
1237 for top, bottom in pairs:
1237 for top, bottom in pairs:
1238 n, l, i = top, [], 0
1238 n, l, i = top, [], 0
1239 f = 1
1239 f = 1
1240
1240
1241 while n != bottom:
1241 while n != bottom:
1242 p = self.changelog.parents(n)[0]
1242 p = self.changelog.parents(n)[0]
1243 if i == f:
1243 if i == f:
1244 l.append(n)
1244 l.append(n)
1245 f = f * 2
1245 f = f * 2
1246 n = p
1246 n = p
1247 i += 1
1247 i += 1
1248
1248
1249 r.append(l)
1249 r.append(l)
1250
1250
1251 return r
1251 return r
1252
1252
1253 def findincoming(self, remote, base=None, heads=None, force=False):
1253 def findincoming(self, remote, base=None, heads=None, force=False):
1254 """Return list of roots of the subsets of missing nodes from remote
1254 """Return list of roots of the subsets of missing nodes from remote
1255
1255
1256 If base dict is specified, assume that these nodes and their parents
1256 If base dict is specified, assume that these nodes and their parents
1257 exist on the remote side and that no child of a node of base exists
1257 exist on the remote side and that no child of a node of base exists
1258 in both remote and self.
1258 in both remote and self.
1259 Furthermore base will be updated to include the nodes that exists
1259 Furthermore base will be updated to include the nodes that exists
1260 in self and remote but no children exists in self and remote.
1260 in self and remote but no children exists in self and remote.
1261 If a list of heads is specified, return only nodes which are heads
1261 If a list of heads is specified, return only nodes which are heads
1262 or ancestors of these heads.
1262 or ancestors of these heads.
1263
1263
1264 All the ancestors of base are in self and in remote.
1264 All the ancestors of base are in self and in remote.
1265 All the descendants of the list returned are missing in self.
1265 All the descendants of the list returned are missing in self.
1266 (and so we know that the rest of the nodes are missing in remote, see
1266 (and so we know that the rest of the nodes are missing in remote, see
1267 outgoing)
1267 outgoing)
1268 """
1268 """
1269 return self.findcommonincoming(remote, base, heads, force)[1]
1269 return self.findcommonincoming(remote, base, heads, force)[1]
1270
1270
1271 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1271 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1272 """Return a tuple (common, missing roots, heads) used to identify
1272 """Return a tuple (common, missing roots, heads) used to identify
1273 missing nodes from remote.
1273 missing nodes from remote.
1274
1274
1275 If base dict is specified, assume that these nodes and their parents
1275 If base dict is specified, assume that these nodes and their parents
1276 exist on the remote side and that no child of a node of base exists
1276 exist on the remote side and that no child of a node of base exists
1277 in both remote and self.
1277 in both remote and self.
1278 Furthermore base will be updated to include the nodes that exists
1278 Furthermore base will be updated to include the nodes that exists
1279 in self and remote but no children exists in self and remote.
1279 in self and remote but no children exists in self and remote.
1280 If a list of heads is specified, return only nodes which are heads
1280 If a list of heads is specified, return only nodes which are heads
1281 or ancestors of these heads.
1281 or ancestors of these heads.
1282
1282
1283 All the ancestors of base are in self and in remote.
1283 All the ancestors of base are in self and in remote.
1284 """
1284 """
1285 m = self.changelog.nodemap
1285 m = self.changelog.nodemap
1286 search = []
1286 search = []
1287 fetch = {}
1287 fetch = {}
1288 seen = {}
1288 seen = {}
1289 seenbranch = {}
1289 seenbranch = {}
1290 if base == None:
1290 if base == None:
1291 base = {}
1291 base = {}
1292
1292
1293 if not heads:
1293 if not heads:
1294 heads = remote.heads()
1294 heads = remote.heads()
1295
1295
1296 if self.changelog.tip() == nullid:
1296 if self.changelog.tip() == nullid:
1297 base[nullid] = 1
1297 base[nullid] = 1
1298 if heads != [nullid]:
1298 if heads != [nullid]:
1299 return [nullid], [nullid], list(heads)
1299 return [nullid], [nullid], list(heads)
1300 return [nullid], [], []
1300 return [nullid], [], []
1301
1301
1302 # assume we're closer to the tip than the root
1302 # assume we're closer to the tip than the root
1303 # and start by examining the heads
1303 # and start by examining the heads
1304 self.ui.status(_("searching for changes\n"))
1304 self.ui.status(_("searching for changes\n"))
1305
1305
1306 unknown = []
1306 unknown = []
1307 for h in heads:
1307 for h in heads:
1308 if h not in m:
1308 if h not in m:
1309 unknown.append(h)
1309 unknown.append(h)
1310 else:
1310 else:
1311 base[h] = 1
1311 base[h] = 1
1312
1312
1313 heads = unknown
1313 heads = unknown
1314 if not unknown:
1314 if not unknown:
1315 return base.keys(), [], []
1315 return base.keys(), [], []
1316
1316
1317 req = dict.fromkeys(unknown)
1317 req = dict.fromkeys(unknown)
1318 reqcnt = 0
1318 reqcnt = 0
1319
1319
1320 # search through remote branches
1320 # search through remote branches
1321 # a 'branch' here is a linear segment of history, with four parts:
1321 # a 'branch' here is a linear segment of history, with four parts:
1322 # head, root, first parent, second parent
1322 # head, root, first parent, second parent
1323 # (a branch always has two parents (or none) by definition)
1323 # (a branch always has two parents (or none) by definition)
1324 unknown = remote.branches(unknown)
1324 unknown = remote.branches(unknown)
1325 while unknown:
1325 while unknown:
1326 r = []
1326 r = []
1327 while unknown:
1327 while unknown:
1328 n = unknown.pop(0)
1328 n = unknown.pop(0)
1329 if n[0] in seen:
1329 if n[0] in seen:
1330 continue
1330 continue
1331
1331
1332 self.ui.debug(_("examining %s:%s\n")
1332 self.ui.debug(_("examining %s:%s\n")
1333 % (short(n[0]), short(n[1])))
1333 % (short(n[0]), short(n[1])))
1334 if n[0] == nullid: # found the end of the branch
1334 if n[0] == nullid: # found the end of the branch
1335 pass
1335 pass
1336 elif n in seenbranch:
1336 elif n in seenbranch:
1337 self.ui.debug(_("branch already found\n"))
1337 self.ui.debug(_("branch already found\n"))
1338 continue
1338 continue
1339 elif n[1] and n[1] in m: # do we know the base?
1339 elif n[1] and n[1] in m: # do we know the base?
1340 self.ui.debug(_("found incomplete branch %s:%s\n")
1340 self.ui.debug(_("found incomplete branch %s:%s\n")
1341 % (short(n[0]), short(n[1])))
1341 % (short(n[0]), short(n[1])))
1342 search.append(n[0:2]) # schedule branch range for scanning
1342 search.append(n[0:2]) # schedule branch range for scanning
1343 seenbranch[n] = 1
1343 seenbranch[n] = 1
1344 else:
1344 else:
1345 if n[1] not in seen and n[1] not in fetch:
1345 if n[1] not in seen and n[1] not in fetch:
1346 if n[2] in m and n[3] in m:
1346 if n[2] in m and n[3] in m:
1347 self.ui.debug(_("found new changeset %s\n") %
1347 self.ui.debug(_("found new changeset %s\n") %
1348 short(n[1]))
1348 short(n[1]))
1349 fetch[n[1]] = 1 # earliest unknown
1349 fetch[n[1]] = 1 # earliest unknown
1350 for p in n[2:4]:
1350 for p in n[2:4]:
1351 if p in m:
1351 if p in m:
1352 base[p] = 1 # latest known
1352 base[p] = 1 # latest known
1353
1353
1354 for p in n[2:4]:
1354 for p in n[2:4]:
1355 if p not in req and p not in m:
1355 if p not in req and p not in m:
1356 r.append(p)
1356 r.append(p)
1357 req[p] = 1
1357 req[p] = 1
1358 seen[n[0]] = 1
1358 seen[n[0]] = 1
1359
1359
1360 if r:
1360 if r:
1361 reqcnt += 1
1361 reqcnt += 1
1362 self.ui.debug(_("request %d: %s\n") %
1362 self.ui.debug(_("request %d: %s\n") %
1363 (reqcnt, " ".join(map(short, r))))
1363 (reqcnt, " ".join(map(short, r))))
1364 for p in xrange(0, len(r), 10):
1364 for p in xrange(0, len(r), 10):
1365 for b in remote.branches(r[p:p+10]):
1365 for b in remote.branches(r[p:p+10]):
1366 self.ui.debug(_("received %s:%s\n") %
1366 self.ui.debug(_("received %s:%s\n") %
1367 (short(b[0]), short(b[1])))
1367 (short(b[0]), short(b[1])))
1368 unknown.append(b)
1368 unknown.append(b)
1369
1369
1370 # do binary search on the branches we found
1370 # do binary search on the branches we found
1371 while search:
1371 while search:
1372 newsearch = []
1372 newsearch = []
1373 reqcnt += 1
1373 reqcnt += 1
1374 for n, l in zip(search, remote.between(search)):
1374 for n, l in zip(search, remote.between(search)):
1375 l.append(n[1])
1375 l.append(n[1])
1376 p = n[0]
1376 p = n[0]
1377 f = 1
1377 f = 1
1378 for i in l:
1378 for i in l:
1379 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1379 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1380 if i in m:
1380 if i in m:
1381 if f <= 2:
1381 if f <= 2:
1382 self.ui.debug(_("found new branch changeset %s\n") %
1382 self.ui.debug(_("found new branch changeset %s\n") %
1383 short(p))
1383 short(p))
1384 fetch[p] = 1
1384 fetch[p] = 1
1385 base[i] = 1
1385 base[i] = 1
1386 else:
1386 else:
1387 self.ui.debug(_("narrowed branch search to %s:%s\n")
1387 self.ui.debug(_("narrowed branch search to %s:%s\n")
1388 % (short(p), short(i)))
1388 % (short(p), short(i)))
1389 newsearch.append((p, i))
1389 newsearch.append((p, i))
1390 break
1390 break
1391 p, f = i, f * 2
1391 p, f = i, f * 2
1392 search = newsearch
1392 search = newsearch
1393
1393
1394 # sanity check our fetch list
1394 # sanity check our fetch list
1395 for f in fetch.keys():
1395 for f in fetch.keys():
1396 if f in m:
1396 if f in m:
1397 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1397 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1398
1398
1399 if base.keys() == [nullid]:
1399 if base.keys() == [nullid]:
1400 if force:
1400 if force:
1401 self.ui.warn(_("warning: repository is unrelated\n"))
1401 self.ui.warn(_("warning: repository is unrelated\n"))
1402 else:
1402 else:
1403 raise util.Abort(_("repository is unrelated"))
1403 raise util.Abort(_("repository is unrelated"))
1404
1404
1405 self.ui.debug(_("found new changesets starting at ") +
1405 self.ui.debug(_("found new changesets starting at ") +
1406 " ".join([short(f) for f in fetch]) + "\n")
1406 " ".join([short(f) for f in fetch]) + "\n")
1407
1407
1408 self.ui.debug(_("%d total queries\n") % reqcnt)
1408 self.ui.debug(_("%d total queries\n") % reqcnt)
1409
1409
1410 return base.keys(), fetch.keys(), heads
1410 return base.keys(), fetch.keys(), heads
1411
1411
1412 def findoutgoing(self, remote, base=None, heads=None, force=False):
1412 def findoutgoing(self, remote, base=None, heads=None, force=False):
1413 """Return list of nodes that are roots of subsets not in remote
1413 """Return list of nodes that are roots of subsets not in remote
1414
1414
1415 If base dict is specified, assume that these nodes and their parents
1415 If base dict is specified, assume that these nodes and their parents
1416 exist on the remote side.
1416 exist on the remote side.
1417 If a list of heads is specified, return only nodes which are heads
1417 If a list of heads is specified, return only nodes which are heads
1418 or ancestors of these heads, and return a second element which
1418 or ancestors of these heads, and return a second element which
1419 contains all remote heads which get new children.
1419 contains all remote heads which get new children.
1420 """
1420 """
1421 if base == None:
1421 if base == None:
1422 base = {}
1422 base = {}
1423 self.findincoming(remote, base, heads, force=force)
1423 self.findincoming(remote, base, heads, force=force)
1424
1424
1425 self.ui.debug(_("common changesets up to ")
1425 self.ui.debug(_("common changesets up to ")
1426 + " ".join(map(short, base.keys())) + "\n")
1426 + " ".join(map(short, base.keys())) + "\n")
1427
1427
1428 remain = dict.fromkeys(self.changelog.nodemap)
1428 remain = dict.fromkeys(self.changelog.nodemap)
1429
1429
1430 # prune everything remote has from the tree
1430 # prune everything remote has from the tree
1431 del remain[nullid]
1431 del remain[nullid]
1432 remove = base.keys()
1432 remove = base.keys()
1433 while remove:
1433 while remove:
1434 n = remove.pop(0)
1434 n = remove.pop(0)
1435 if n in remain:
1435 if n in remain:
1436 del remain[n]
1436 del remain[n]
1437 for p in self.changelog.parents(n):
1437 for p in self.changelog.parents(n):
1438 remove.append(p)
1438 remove.append(p)
1439
1439
1440 # find every node whose parents have been pruned
1440 # find every node whose parents have been pruned
1441 subset = []
1441 subset = []
1442 # find every remote head that will get new children
1442 # find every remote head that will get new children
1443 updated_heads = {}
1443 updated_heads = {}
1444 for n in remain:
1444 for n in remain:
1445 p1, p2 = self.changelog.parents(n)
1445 p1, p2 = self.changelog.parents(n)
1446 if p1 not in remain and p2 not in remain:
1446 if p1 not in remain and p2 not in remain:
1447 subset.append(n)
1447 subset.append(n)
1448 if heads:
1448 if heads:
1449 if p1 in heads:
1449 if p1 in heads:
1450 updated_heads[p1] = True
1450 updated_heads[p1] = True
1451 if p2 in heads:
1451 if p2 in heads:
1452 updated_heads[p2] = True
1452 updated_heads[p2] = True
1453
1453
1454 # this is the set of all roots we have to push
1454 # this is the set of all roots we have to push
1455 if heads:
1455 if heads:
1456 return subset, updated_heads.keys()
1456 return subset, updated_heads.keys()
1457 else:
1457 else:
1458 return subset
1458 return subset
1459
1459
1460 def pull(self, remote, heads=None, force=False):
1460 def pull(self, remote, heads=None, force=False):
1461 lock = self.lock()
1461 lock = self.lock()
1462 try:
1462 try:
1463 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1463 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1464 force=force)
1464 force=force)
1465 if fetch == [nullid]:
1465 if fetch == [nullid]:
1466 self.ui.status(_("requesting all changes\n"))
1466 self.ui.status(_("requesting all changes\n"))
1467
1467
1468 if not fetch:
1468 if not fetch:
1469 self.ui.status(_("no changes found\n"))
1469 self.ui.status(_("no changes found\n"))
1470 return 0
1470 return 0
1471
1471
1472 if heads is None and remote.capable('changegroupsubset'):
1472 if heads is None and remote.capable('changegroupsubset'):
1473 heads = rheads
1473 heads = rheads
1474
1474
1475 if heads is None:
1475 if heads is None:
1476 cg = remote.changegroup(fetch, 'pull')
1476 cg = remote.changegroup(fetch, 'pull')
1477 else:
1477 else:
1478 if not remote.capable('changegroupsubset'):
1478 if not remote.capable('changegroupsubset'):
1479 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1479 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1480 cg = remote.changegroupsubset(fetch, heads, 'pull')
1480 cg = remote.changegroupsubset(fetch, heads, 'pull')
1481 return self.addchangegroup(cg, 'pull', remote.url())
1481 return self.addchangegroup(cg, 'pull', remote.url())
1482 finally:
1482 finally:
1483 del lock
1483 del lock
1484
1484
1485 def push(self, remote, force=False, revs=None):
1485 def push(self, remote, force=False, revs=None):
1486 # there are two ways to push to remote repo:
1486 # there are two ways to push to remote repo:
1487 #
1487 #
1488 # addchangegroup assumes local user can lock remote
1488 # addchangegroup assumes local user can lock remote
1489 # repo (local filesystem, old ssh servers).
1489 # repo (local filesystem, old ssh servers).
1490 #
1490 #
1491 # unbundle assumes local user cannot lock remote repo (new ssh
1491 # unbundle assumes local user cannot lock remote repo (new ssh
1492 # servers, http servers).
1492 # servers, http servers).
1493
1493
1494 if remote.capable('unbundle'):
1494 if remote.capable('unbundle'):
1495 return self.push_unbundle(remote, force, revs)
1495 return self.push_unbundle(remote, force, revs)
1496 return self.push_addchangegroup(remote, force, revs)
1496 return self.push_addchangegroup(remote, force, revs)
1497
1497
1498 def prepush(self, remote, force, revs):
1498 def prepush(self, remote, force, revs):
1499 base = {}
1499 common = {}
1500 remote_heads = remote.heads()
1500 remote_heads = remote.heads()
1501 inc = self.findincoming(remote, base, remote_heads, force=force)
1501 inc = self.findincoming(remote, common, remote_heads, force=force)
1502
1502
1503 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1503 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1504 if revs is not None:
1504 if revs is not None:
1505 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1505 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1506 else:
1506 else:
1507 bases, heads = update, self.changelog.heads()
1507 bases, heads = update, self.changelog.heads()
1508
1508
1509 if not bases:
1509 if not bases:
1510 self.ui.status(_("no changes found\n"))
1510 self.ui.status(_("no changes found\n"))
1511 return None, 1
1511 return None, 1
1512 elif not force:
1512 elif not force:
1513 # check if we're creating new remote heads
1513 # check if we're creating new remote heads
1514 # to be a remote head after push, node must be either
1514 # to be a remote head after push, node must be either
1515 # - unknown locally
1515 # - unknown locally
1516 # - a local outgoing head descended from update
1516 # - a local outgoing head descended from update
1517 # - a remote head that's known locally and not
1517 # - a remote head that's known locally and not
1518 # ancestral to an outgoing head
1518 # ancestral to an outgoing head
1519
1519
1520 warn = 0
1520 warn = 0
1521
1521
1522 if remote_heads == [nullid]:
1522 if remote_heads == [nullid]:
1523 warn = 0
1523 warn = 0
1524 elif not revs and len(heads) > len(remote_heads):
1524 elif not revs and len(heads) > len(remote_heads):
1525 warn = 1
1525 warn = 1
1526 else:
1526 else:
1527 newheads = list(heads)
1527 newheads = list(heads)
1528 for r in remote_heads:
1528 for r in remote_heads:
1529 if r in self.changelog.nodemap:
1529 if r in self.changelog.nodemap:
1530 desc = self.changelog.heads(r, heads)
1530 desc = self.changelog.heads(r, heads)
1531 l = [h for h in heads if h in desc]
1531 l = [h for h in heads if h in desc]
1532 if not l:
1532 if not l:
1533 newheads.append(r)
1533 newheads.append(r)
1534 else:
1534 else:
1535 newheads.append(r)
1535 newheads.append(r)
1536 if len(newheads) > len(remote_heads):
1536 if len(newheads) > len(remote_heads):
1537 warn = 1
1537 warn = 1
1538
1538
1539 if warn:
1539 if warn:
1540 self.ui.warn(_("abort: push creates new remote heads!\n"))
1540 self.ui.warn(_("abort: push creates new remote heads!\n"))
1541 self.ui.status(_("(did you forget to merge?"
1541 self.ui.status(_("(did you forget to merge?"
1542 " use push -f to force)\n"))
1542 " use push -f to force)\n"))
1543 return None, 0
1543 return None, 0
1544 elif inc:
1544 elif inc:
1545 self.ui.warn(_("note: unsynced remote changes!\n"))
1545 self.ui.warn(_("note: unsynced remote changes!\n"))
1546
1546
1547
1547
1548 if revs is None:
1548 if revs is None:
1549 cg = self.changegroup(update, 'push')
1549 # use the fast path, no race possible on push
1550 cg = self._changegroup(common.keys(), 'push')
1550 else:
1551 else:
1551 cg = self.changegroupsubset(update, revs, 'push')
1552 cg = self.changegroupsubset(update, revs, 'push')
1552 return cg, remote_heads
1553 return cg, remote_heads
1553
1554
1554 def push_addchangegroup(self, remote, force, revs):
1555 def push_addchangegroup(self, remote, force, revs):
1555 lock = remote.lock()
1556 lock = remote.lock()
1556 try:
1557 try:
1557 ret = self.prepush(remote, force, revs)
1558 ret = self.prepush(remote, force, revs)
1558 if ret[0] is not None:
1559 if ret[0] is not None:
1559 cg, remote_heads = ret
1560 cg, remote_heads = ret
1560 return remote.addchangegroup(cg, 'push', self.url())
1561 return remote.addchangegroup(cg, 'push', self.url())
1561 return ret[1]
1562 return ret[1]
1562 finally:
1563 finally:
1563 del lock
1564 del lock
1564
1565
1565 def push_unbundle(self, remote, force, revs):
1566 def push_unbundle(self, remote, force, revs):
1566 # local repo finds heads on server, finds out what revs it
1567 # local repo finds heads on server, finds out what revs it
1567 # must push. once revs transferred, if server finds it has
1568 # must push. once revs transferred, if server finds it has
1568 # different heads (someone else won commit/push race), server
1569 # different heads (someone else won commit/push race), server
1569 # aborts.
1570 # aborts.
1570
1571
1571 ret = self.prepush(remote, force, revs)
1572 ret = self.prepush(remote, force, revs)
1572 if ret[0] is not None:
1573 if ret[0] is not None:
1573 cg, remote_heads = ret
1574 cg, remote_heads = ret
1574 if force: remote_heads = ['force']
1575 if force: remote_heads = ['force']
1575 return remote.unbundle(cg, remote_heads, 'push')
1576 return remote.unbundle(cg, remote_heads, 'push')
1576 return ret[1]
1577 return ret[1]
1577
1578
1578 def changegroupinfo(self, nodes, source):
1579 def changegroupinfo(self, nodes, source):
1579 if self.ui.verbose or source == 'bundle':
1580 if self.ui.verbose or source == 'bundle':
1580 self.ui.status(_("%d changesets found\n") % len(nodes))
1581 self.ui.status(_("%d changesets found\n") % len(nodes))
1581 if self.ui.debugflag:
1582 if self.ui.debugflag:
1582 self.ui.debug(_("List of changesets:\n"))
1583 self.ui.debug(_("List of changesets:\n"))
1583 for node in nodes:
1584 for node in nodes:
1584 self.ui.debug("%s\n" % hex(node))
1585 self.ui.debug("%s\n" % hex(node))
1585
1586
1586 def changegroupsubset(self, bases, heads, source, extranodes=None):
1587 def changegroupsubset(self, bases, heads, source, extranodes=None):
1587 """This function generates a changegroup consisting of all the nodes
1588 """This function generates a changegroup consisting of all the nodes
1588 that are descendents of any of the bases, and ancestors of any of
1589 that are descendents of any of the bases, and ancestors of any of
1589 the heads.
1590 the heads.
1590
1591
1591 It is fairly complex as determining which filenodes and which
1592 It is fairly complex as determining which filenodes and which
1592 manifest nodes need to be included for the changeset to be complete
1593 manifest nodes need to be included for the changeset to be complete
1593 is non-trivial.
1594 is non-trivial.
1594
1595
1595 Another wrinkle is doing the reverse, figuring out which changeset in
1596 Another wrinkle is doing the reverse, figuring out which changeset in
1596 the changegroup a particular filenode or manifestnode belongs to.
1597 the changegroup a particular filenode or manifestnode belongs to.
1597
1598
1598 The caller can specify some nodes that must be included in the
1599 The caller can specify some nodes that must be included in the
1599 changegroup using the extranodes argument. It should be a dict
1600 changegroup using the extranodes argument. It should be a dict
1600 where the keys are the filenames (or 1 for the manifest), and the
1601 where the keys are the filenames (or 1 for the manifest), and the
1601 values are lists of (node, linknode) tuples, where node is a wanted
1602 values are lists of (node, linknode) tuples, where node is a wanted
1602 node and linknode is the changelog node that should be transmitted as
1603 node and linknode is the changelog node that should be transmitted as
1603 the linkrev.
1604 the linkrev.
1604 """
1605 """
1605
1606
1606 if extranodes is None:
1607 if extranodes is None:
1607 # can we go through the fast path ?
1608 # can we go through the fast path ?
1608 heads.sort()
1609 heads.sort()
1609 allheads = self.heads()
1610 allheads = self.heads()
1610 allheads.sort()
1611 allheads.sort()
1611 if heads == allheads:
1612 if heads == allheads:
1612 common = []
1613 common = []
1613 # parents of bases are known from both sides
1614 # parents of bases are known from both sides
1614 for n in bases:
1615 for n in bases:
1615 for p in self.changelog.parents(n):
1616 for p in self.changelog.parents(n):
1616 if p != nullid:
1617 if p != nullid:
1617 common.append(p)
1618 common.append(p)
1618 return self._changegroup(common, source)
1619 return self._changegroup(common, source)
1619
1620
1620 self.hook('preoutgoing', throw=True, source=source)
1621 self.hook('preoutgoing', throw=True, source=source)
1621
1622
1622 # Set up some initial variables
1623 # Set up some initial variables
1623 # Make it easy to refer to self.changelog
1624 # Make it easy to refer to self.changelog
1624 cl = self.changelog
1625 cl = self.changelog
1625 # msng is short for missing - compute the list of changesets in this
1626 # msng is short for missing - compute the list of changesets in this
1626 # changegroup.
1627 # changegroup.
1627 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1628 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1628 self.changegroupinfo(msng_cl_lst, source)
1629 self.changegroupinfo(msng_cl_lst, source)
1629 # Some bases may turn out to be superfluous, and some heads may be
1630 # Some bases may turn out to be superfluous, and some heads may be
1630 # too. nodesbetween will return the minimal set of bases and heads
1631 # too. nodesbetween will return the minimal set of bases and heads
1631 # necessary to re-create the changegroup.
1632 # necessary to re-create the changegroup.
1632
1633
1633 # Known heads are the list of heads that it is assumed the recipient
1634 # Known heads are the list of heads that it is assumed the recipient
1634 # of this changegroup will know about.
1635 # of this changegroup will know about.
1635 knownheads = {}
1636 knownheads = {}
1636 # We assume that all parents of bases are known heads.
1637 # We assume that all parents of bases are known heads.
1637 for n in bases:
1638 for n in bases:
1638 for p in cl.parents(n):
1639 for p in cl.parents(n):
1639 if p != nullid:
1640 if p != nullid:
1640 knownheads[p] = 1
1641 knownheads[p] = 1
1641 knownheads = knownheads.keys()
1642 knownheads = knownheads.keys()
1642 if knownheads:
1643 if knownheads:
1643 # Now that we know what heads are known, we can compute which
1644 # Now that we know what heads are known, we can compute which
1644 # changesets are known. The recipient must know about all
1645 # changesets are known. The recipient must know about all
1645 # changesets required to reach the known heads from the null
1646 # changesets required to reach the known heads from the null
1646 # changeset.
1647 # changeset.
1647 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1648 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1648 junk = None
1649 junk = None
1649 # Transform the list into an ersatz set.
1650 # Transform the list into an ersatz set.
1650 has_cl_set = dict.fromkeys(has_cl_set)
1651 has_cl_set = dict.fromkeys(has_cl_set)
1651 else:
1652 else:
1652 # If there were no known heads, the recipient cannot be assumed to
1653 # If there were no known heads, the recipient cannot be assumed to
1653 # know about any changesets.
1654 # know about any changesets.
1654 has_cl_set = {}
1655 has_cl_set = {}
1655
1656
1656 # Make it easy to refer to self.manifest
1657 # Make it easy to refer to self.manifest
1657 mnfst = self.manifest
1658 mnfst = self.manifest
1658 # We don't know which manifests are missing yet
1659 # We don't know which manifests are missing yet
1659 msng_mnfst_set = {}
1660 msng_mnfst_set = {}
1660 # Nor do we know which filenodes are missing.
1661 # Nor do we know which filenodes are missing.
1661 msng_filenode_set = {}
1662 msng_filenode_set = {}
1662
1663
1663 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1664 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1664 junk = None
1665 junk = None
1665
1666
1666 # A changeset always belongs to itself, so the changenode lookup
1667 # A changeset always belongs to itself, so the changenode lookup
1667 # function for a changenode is identity.
1668 # function for a changenode is identity.
1668 def identity(x):
1669 def identity(x):
1669 return x
1670 return x
1670
1671
1671 # A function generating function. Sets up an environment for the
1672 # A function generating function. Sets up an environment for the
1672 # inner function.
1673 # inner function.
1673 def cmp_by_rev_func(revlog):
1674 def cmp_by_rev_func(revlog):
1674 # Compare two nodes by their revision number in the environment's
1675 # Compare two nodes by their revision number in the environment's
1675 # revision history. Since the revision number both represents the
1676 # revision history. Since the revision number both represents the
1676 # most efficient order to read the nodes in, and represents a
1677 # most efficient order to read the nodes in, and represents a
1677 # topological sorting of the nodes, this function is often useful.
1678 # topological sorting of the nodes, this function is often useful.
1678 def cmp_by_rev(a, b):
1679 def cmp_by_rev(a, b):
1679 return cmp(revlog.rev(a), revlog.rev(b))
1680 return cmp(revlog.rev(a), revlog.rev(b))
1680 return cmp_by_rev
1681 return cmp_by_rev
1681
1682
1682 # If we determine that a particular file or manifest node must be a
1683 # If we determine that a particular file or manifest node must be a
1683 # node that the recipient of the changegroup will already have, we can
1684 # node that the recipient of the changegroup will already have, we can
1684 # also assume the recipient will have all the parents. This function
1685 # also assume the recipient will have all the parents. This function
1685 # prunes them from the set of missing nodes.
1686 # prunes them from the set of missing nodes.
1686 def prune_parents(revlog, hasset, msngset):
1687 def prune_parents(revlog, hasset, msngset):
1687 haslst = hasset.keys()
1688 haslst = hasset.keys()
1688 haslst.sort(cmp_by_rev_func(revlog))
1689 haslst.sort(cmp_by_rev_func(revlog))
1689 for node in haslst:
1690 for node in haslst:
1690 parentlst = [p for p in revlog.parents(node) if p != nullid]
1691 parentlst = [p for p in revlog.parents(node) if p != nullid]
1691 while parentlst:
1692 while parentlst:
1692 n = parentlst.pop()
1693 n = parentlst.pop()
1693 if n not in hasset:
1694 if n not in hasset:
1694 hasset[n] = 1
1695 hasset[n] = 1
1695 p = [p for p in revlog.parents(n) if p != nullid]
1696 p = [p for p in revlog.parents(n) if p != nullid]
1696 parentlst.extend(p)
1697 parentlst.extend(p)
1697 for n in hasset:
1698 for n in hasset:
1698 msngset.pop(n, None)
1699 msngset.pop(n, None)
1699
1700
1700 # This is a function generating function used to set up an environment
1701 # This is a function generating function used to set up an environment
1701 # for the inner function to execute in.
1702 # for the inner function to execute in.
1702 def manifest_and_file_collector(changedfileset):
1703 def manifest_and_file_collector(changedfileset):
1703 # This is an information gathering function that gathers
1704 # This is an information gathering function that gathers
1704 # information from each changeset node that goes out as part of
1705 # information from each changeset node that goes out as part of
1705 # the changegroup. The information gathered is a list of which
1706 # the changegroup. The information gathered is a list of which
1706 # manifest nodes are potentially required (the recipient may
1707 # manifest nodes are potentially required (the recipient may
1707 # already have them) and total list of all files which were
1708 # already have them) and total list of all files which were
1708 # changed in any changeset in the changegroup.
1709 # changed in any changeset in the changegroup.
1709 #
1710 #
1710 # We also remember the first changenode we saw any manifest
1711 # We also remember the first changenode we saw any manifest
1711 # referenced by so we can later determine which changenode 'owns'
1712 # referenced by so we can later determine which changenode 'owns'
1712 # the manifest.
1713 # the manifest.
1713 def collect_manifests_and_files(clnode):
1714 def collect_manifests_and_files(clnode):
1714 c = cl.read(clnode)
1715 c = cl.read(clnode)
1715 for f in c[3]:
1716 for f in c[3]:
1716 # This is to make sure we only have one instance of each
1717 # This is to make sure we only have one instance of each
1717 # filename string for each filename.
1718 # filename string for each filename.
1718 changedfileset.setdefault(f, f)
1719 changedfileset.setdefault(f, f)
1719 msng_mnfst_set.setdefault(c[0], clnode)
1720 msng_mnfst_set.setdefault(c[0], clnode)
1720 return collect_manifests_and_files
1721 return collect_manifests_and_files
1721
1722
1722 # Figure out which manifest nodes (of the ones we think might be part
1723 # Figure out which manifest nodes (of the ones we think might be part
1723 # of the changegroup) the recipient must know about and remove them
1724 # of the changegroup) the recipient must know about and remove them
1724 # from the changegroup.
1725 # from the changegroup.
1725 def prune_manifests():
1726 def prune_manifests():
1726 has_mnfst_set = {}
1727 has_mnfst_set = {}
1727 for n in msng_mnfst_set:
1728 for n in msng_mnfst_set:
1728 # If a 'missing' manifest thinks it belongs to a changenode
1729 # If a 'missing' manifest thinks it belongs to a changenode
1729 # the recipient is assumed to have, obviously the recipient
1730 # the recipient is assumed to have, obviously the recipient
1730 # must have that manifest.
1731 # must have that manifest.
1731 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1732 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1732 if linknode in has_cl_set:
1733 if linknode in has_cl_set:
1733 has_mnfst_set[n] = 1
1734 has_mnfst_set[n] = 1
1734 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1735 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1735
1736
1736 # Use the information collected in collect_manifests_and_files to say
1737 # Use the information collected in collect_manifests_and_files to say
1737 # which changenode any manifestnode belongs to.
1738 # which changenode any manifestnode belongs to.
1738 def lookup_manifest_link(mnfstnode):
1739 def lookup_manifest_link(mnfstnode):
1739 return msng_mnfst_set[mnfstnode]
1740 return msng_mnfst_set[mnfstnode]
1740
1741
1741 # A function generating function that sets up the initial environment
1742 # A function generating function that sets up the initial environment
1742 # the inner function.
1743 # the inner function.
1743 def filenode_collector(changedfiles):
1744 def filenode_collector(changedfiles):
1744 next_rev = [0]
1745 next_rev = [0]
1745 # This gathers information from each manifestnode included in the
1746 # This gathers information from each manifestnode included in the
1746 # changegroup about which filenodes the manifest node references
1747 # changegroup about which filenodes the manifest node references
1747 # so we can include those in the changegroup too.
1748 # so we can include those in the changegroup too.
1748 #
1749 #
1749 # It also remembers which changenode each filenode belongs to. It
1750 # It also remembers which changenode each filenode belongs to. It
1750 # does this by assuming the a filenode belongs to the changenode
1751 # does this by assuming the a filenode belongs to the changenode
1751 # the first manifest that references it belongs to.
1752 # the first manifest that references it belongs to.
1752 def collect_msng_filenodes(mnfstnode):
1753 def collect_msng_filenodes(mnfstnode):
1753 r = mnfst.rev(mnfstnode)
1754 r = mnfst.rev(mnfstnode)
1754 if r == next_rev[0]:
1755 if r == next_rev[0]:
1755 # If the last rev we looked at was the one just previous,
1756 # If the last rev we looked at was the one just previous,
1756 # we only need to see a diff.
1757 # we only need to see a diff.
1757 deltamf = mnfst.readdelta(mnfstnode)
1758 deltamf = mnfst.readdelta(mnfstnode)
1758 # For each line in the delta
1759 # For each line in the delta
1759 for f, fnode in deltamf.items():
1760 for f, fnode in deltamf.items():
1760 f = changedfiles.get(f, None)
1761 f = changedfiles.get(f, None)
1761 # And if the file is in the list of files we care
1762 # And if the file is in the list of files we care
1762 # about.
1763 # about.
1763 if f is not None:
1764 if f is not None:
1764 # Get the changenode this manifest belongs to
1765 # Get the changenode this manifest belongs to
1765 clnode = msng_mnfst_set[mnfstnode]
1766 clnode = msng_mnfst_set[mnfstnode]
1766 # Create the set of filenodes for the file if
1767 # Create the set of filenodes for the file if
1767 # there isn't one already.
1768 # there isn't one already.
1768 ndset = msng_filenode_set.setdefault(f, {})
1769 ndset = msng_filenode_set.setdefault(f, {})
1769 # And set the filenode's changelog node to the
1770 # And set the filenode's changelog node to the
1770 # manifest's if it hasn't been set already.
1771 # manifest's if it hasn't been set already.
1771 ndset.setdefault(fnode, clnode)
1772 ndset.setdefault(fnode, clnode)
1772 else:
1773 else:
1773 # Otherwise we need a full manifest.
1774 # Otherwise we need a full manifest.
1774 m = mnfst.read(mnfstnode)
1775 m = mnfst.read(mnfstnode)
1775 # For every file in we care about.
1776 # For every file in we care about.
1776 for f in changedfiles:
1777 for f in changedfiles:
1777 fnode = m.get(f, None)
1778 fnode = m.get(f, None)
1778 # If it's in the manifest
1779 # If it's in the manifest
1779 if fnode is not None:
1780 if fnode is not None:
1780 # See comments above.
1781 # See comments above.
1781 clnode = msng_mnfst_set[mnfstnode]
1782 clnode = msng_mnfst_set[mnfstnode]
1782 ndset = msng_filenode_set.setdefault(f, {})
1783 ndset = msng_filenode_set.setdefault(f, {})
1783 ndset.setdefault(fnode, clnode)
1784 ndset.setdefault(fnode, clnode)
1784 # Remember the revision we hope to see next.
1785 # Remember the revision we hope to see next.
1785 next_rev[0] = r + 1
1786 next_rev[0] = r + 1
1786 return collect_msng_filenodes
1787 return collect_msng_filenodes
1787
1788
1788 # We have a list of filenodes we think we need for a file, lets remove
1789 # We have a list of filenodes we think we need for a file, lets remove
1789 # all those we now the recipient must have.
1790 # all those we now the recipient must have.
1790 def prune_filenodes(f, filerevlog):
1791 def prune_filenodes(f, filerevlog):
1791 msngset = msng_filenode_set[f]
1792 msngset = msng_filenode_set[f]
1792 hasset = {}
1793 hasset = {}
1793 # If a 'missing' filenode thinks it belongs to a changenode we
1794 # If a 'missing' filenode thinks it belongs to a changenode we
1794 # assume the recipient must have, then the recipient must have
1795 # assume the recipient must have, then the recipient must have
1795 # that filenode.
1796 # that filenode.
1796 for n in msngset:
1797 for n in msngset:
1797 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1798 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1798 if clnode in has_cl_set:
1799 if clnode in has_cl_set:
1799 hasset[n] = 1
1800 hasset[n] = 1
1800 prune_parents(filerevlog, hasset, msngset)
1801 prune_parents(filerevlog, hasset, msngset)
1801
1802
1802 # A function generator function that sets up the a context for the
1803 # A function generator function that sets up the a context for the
1803 # inner function.
1804 # inner function.
1804 def lookup_filenode_link_func(fname):
1805 def lookup_filenode_link_func(fname):
1805 msngset = msng_filenode_set[fname]
1806 msngset = msng_filenode_set[fname]
1806 # Lookup the changenode the filenode belongs to.
1807 # Lookup the changenode the filenode belongs to.
1807 def lookup_filenode_link(fnode):
1808 def lookup_filenode_link(fnode):
1808 return msngset[fnode]
1809 return msngset[fnode]
1809 return lookup_filenode_link
1810 return lookup_filenode_link
1810
1811
1811 # Add the nodes that were explicitly requested.
1812 # Add the nodes that were explicitly requested.
1812 def add_extra_nodes(name, nodes):
1813 def add_extra_nodes(name, nodes):
1813 if not extranodes or name not in extranodes:
1814 if not extranodes or name not in extranodes:
1814 return
1815 return
1815
1816
1816 for node, linknode in extranodes[name]:
1817 for node, linknode in extranodes[name]:
1817 if node not in nodes:
1818 if node not in nodes:
1818 nodes[node] = linknode
1819 nodes[node] = linknode
1819
1820
1820 # Now that we have all theses utility functions to help out and
1821 # Now that we have all theses utility functions to help out and
1821 # logically divide up the task, generate the group.
1822 # logically divide up the task, generate the group.
1822 def gengroup():
1823 def gengroup():
1823 # The set of changed files starts empty.
1824 # The set of changed files starts empty.
1824 changedfiles = {}
1825 changedfiles = {}
1825 # Create a changenode group generator that will call our functions
1826 # Create a changenode group generator that will call our functions
1826 # back to lookup the owning changenode and collect information.
1827 # back to lookup the owning changenode and collect information.
1827 group = cl.group(msng_cl_lst, identity,
1828 group = cl.group(msng_cl_lst, identity,
1828 manifest_and_file_collector(changedfiles))
1829 manifest_and_file_collector(changedfiles))
1829 for chnk in group:
1830 for chnk in group:
1830 yield chnk
1831 yield chnk
1831
1832
1832 # The list of manifests has been collected by the generator
1833 # The list of manifests has been collected by the generator
1833 # calling our functions back.
1834 # calling our functions back.
1834 prune_manifests()
1835 prune_manifests()
1835 add_extra_nodes(1, msng_mnfst_set)
1836 add_extra_nodes(1, msng_mnfst_set)
1836 msng_mnfst_lst = msng_mnfst_set.keys()
1837 msng_mnfst_lst = msng_mnfst_set.keys()
1837 # Sort the manifestnodes by revision number.
1838 # Sort the manifestnodes by revision number.
1838 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1839 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1839 # Create a generator for the manifestnodes that calls our lookup
1840 # Create a generator for the manifestnodes that calls our lookup
1840 # and data collection functions back.
1841 # and data collection functions back.
1841 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1842 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1842 filenode_collector(changedfiles))
1843 filenode_collector(changedfiles))
1843 for chnk in group:
1844 for chnk in group:
1844 yield chnk
1845 yield chnk
1845
1846
1846 # These are no longer needed, dereference and toss the memory for
1847 # These are no longer needed, dereference and toss the memory for
1847 # them.
1848 # them.
1848 msng_mnfst_lst = None
1849 msng_mnfst_lst = None
1849 msng_mnfst_set.clear()
1850 msng_mnfst_set.clear()
1850
1851
1851 if extranodes:
1852 if extranodes:
1852 for fname in extranodes:
1853 for fname in extranodes:
1853 if isinstance(fname, int):
1854 if isinstance(fname, int):
1854 continue
1855 continue
1855 msng_filenode_set.setdefault(fname, {})
1856 msng_filenode_set.setdefault(fname, {})
1856 changedfiles[fname] = 1
1857 changedfiles[fname] = 1
1857 # Go through all our files in order sorted by name.
1858 # Go through all our files in order sorted by name.
1858 for fname in util.sort(changedfiles):
1859 for fname in util.sort(changedfiles):
1859 filerevlog = self.file(fname)
1860 filerevlog = self.file(fname)
1860 if not len(filerevlog):
1861 if not len(filerevlog):
1861 raise util.Abort(_("empty or missing revlog for %s") % fname)
1862 raise util.Abort(_("empty or missing revlog for %s") % fname)
1862 # Toss out the filenodes that the recipient isn't really
1863 # Toss out the filenodes that the recipient isn't really
1863 # missing.
1864 # missing.
1864 if fname in msng_filenode_set:
1865 if fname in msng_filenode_set:
1865 prune_filenodes(fname, filerevlog)
1866 prune_filenodes(fname, filerevlog)
1866 add_extra_nodes(fname, msng_filenode_set[fname])
1867 add_extra_nodes(fname, msng_filenode_set[fname])
1867 msng_filenode_lst = msng_filenode_set[fname].keys()
1868 msng_filenode_lst = msng_filenode_set[fname].keys()
1868 else:
1869 else:
1869 msng_filenode_lst = []
1870 msng_filenode_lst = []
1870 # If any filenodes are left, generate the group for them,
1871 # If any filenodes are left, generate the group for them,
1871 # otherwise don't bother.
1872 # otherwise don't bother.
1872 if len(msng_filenode_lst) > 0:
1873 if len(msng_filenode_lst) > 0:
1873 yield changegroup.chunkheader(len(fname))
1874 yield changegroup.chunkheader(len(fname))
1874 yield fname
1875 yield fname
1875 # Sort the filenodes by their revision #
1876 # Sort the filenodes by their revision #
1876 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1877 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1877 # Create a group generator and only pass in a changenode
1878 # Create a group generator and only pass in a changenode
1878 # lookup function as we need to collect no information
1879 # lookup function as we need to collect no information
1879 # from filenodes.
1880 # from filenodes.
1880 group = filerevlog.group(msng_filenode_lst,
1881 group = filerevlog.group(msng_filenode_lst,
1881 lookup_filenode_link_func(fname))
1882 lookup_filenode_link_func(fname))
1882 for chnk in group:
1883 for chnk in group:
1883 yield chnk
1884 yield chnk
1884 if fname in msng_filenode_set:
1885 if fname in msng_filenode_set:
1885 # Don't need this anymore, toss it to free memory.
1886 # Don't need this anymore, toss it to free memory.
1886 del msng_filenode_set[fname]
1887 del msng_filenode_set[fname]
1887 # Signal that no more groups are left.
1888 # Signal that no more groups are left.
1888 yield changegroup.closechunk()
1889 yield changegroup.closechunk()
1889
1890
1890 if msng_cl_lst:
1891 if msng_cl_lst:
1891 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1892 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1892
1893
1893 return util.chunkbuffer(gengroup())
1894 return util.chunkbuffer(gengroup())
1894
1895
1895 def changegroup(self, basenodes, source):
1896 def changegroup(self, basenodes, source):
1896 # to avoid a race we use changegroupsubset() (issue1320)
1897 # to avoid a race we use changegroupsubset() (issue1320)
1897 return self.changegroupsubset(basenodes, self.heads(), source)
1898 return self.changegroupsubset(basenodes, self.heads(), source)
1898
1899
1899 def _changegroup(self, common, source):
1900 def _changegroup(self, common, source):
1900 """Generate a changegroup of all nodes that we have that a recipient
1901 """Generate a changegroup of all nodes that we have that a recipient
1901 doesn't.
1902 doesn't.
1902
1903
1903 This is much easier than the previous function as we can assume that
1904 This is much easier than the previous function as we can assume that
1904 the recipient has any changenode we aren't sending them.
1905 the recipient has any changenode we aren't sending them.
1905
1906
1906 common is the set of common nodes between remote and self"""
1907 common is the set of common nodes between remote and self"""
1907
1908
1908 self.hook('preoutgoing', throw=True, source=source)
1909 self.hook('preoutgoing', throw=True, source=source)
1909
1910
1910 cl = self.changelog
1911 cl = self.changelog
1911 nodes = cl.findmissing(common)
1912 nodes = cl.findmissing(common)
1912 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1913 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1913 self.changegroupinfo(nodes, source)
1914 self.changegroupinfo(nodes, source)
1914
1915
1915 def identity(x):
1916 def identity(x):
1916 return x
1917 return x
1917
1918
1918 def gennodelst(log):
1919 def gennodelst(log):
1919 for r in log:
1920 for r in log:
1920 if log.linkrev(r) in revset:
1921 if log.linkrev(r) in revset:
1921 yield log.node(r)
1922 yield log.node(r)
1922
1923
1923 def changed_file_collector(changedfileset):
1924 def changed_file_collector(changedfileset):
1924 def collect_changed_files(clnode):
1925 def collect_changed_files(clnode):
1925 c = cl.read(clnode)
1926 c = cl.read(clnode)
1926 for fname in c[3]:
1927 for fname in c[3]:
1927 changedfileset[fname] = 1
1928 changedfileset[fname] = 1
1928 return collect_changed_files
1929 return collect_changed_files
1929
1930
1930 def lookuprevlink_func(revlog):
1931 def lookuprevlink_func(revlog):
1931 def lookuprevlink(n):
1932 def lookuprevlink(n):
1932 return cl.node(revlog.linkrev(revlog.rev(n)))
1933 return cl.node(revlog.linkrev(revlog.rev(n)))
1933 return lookuprevlink
1934 return lookuprevlink
1934
1935
1935 def gengroup():
1936 def gengroup():
1936 # construct a list of all changed files
1937 # construct a list of all changed files
1937 changedfiles = {}
1938 changedfiles = {}
1938
1939
1939 for chnk in cl.group(nodes, identity,
1940 for chnk in cl.group(nodes, identity,
1940 changed_file_collector(changedfiles)):
1941 changed_file_collector(changedfiles)):
1941 yield chnk
1942 yield chnk
1942
1943
1943 mnfst = self.manifest
1944 mnfst = self.manifest
1944 nodeiter = gennodelst(mnfst)
1945 nodeiter = gennodelst(mnfst)
1945 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1946 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1946 yield chnk
1947 yield chnk
1947
1948
1948 for fname in util.sort(changedfiles):
1949 for fname in util.sort(changedfiles):
1949 filerevlog = self.file(fname)
1950 filerevlog = self.file(fname)
1950 if not len(filerevlog):
1951 if not len(filerevlog):
1951 raise util.Abort(_("empty or missing revlog for %s") % fname)
1952 raise util.Abort(_("empty or missing revlog for %s") % fname)
1952 nodeiter = gennodelst(filerevlog)
1953 nodeiter = gennodelst(filerevlog)
1953 nodeiter = list(nodeiter)
1954 nodeiter = list(nodeiter)
1954 if nodeiter:
1955 if nodeiter:
1955 yield changegroup.chunkheader(len(fname))
1956 yield changegroup.chunkheader(len(fname))
1956 yield fname
1957 yield fname
1957 lookup = lookuprevlink_func(filerevlog)
1958 lookup = lookuprevlink_func(filerevlog)
1958 for chnk in filerevlog.group(nodeiter, lookup):
1959 for chnk in filerevlog.group(nodeiter, lookup):
1959 yield chnk
1960 yield chnk
1960
1961
1961 yield changegroup.closechunk()
1962 yield changegroup.closechunk()
1962
1963
1963 if nodes:
1964 if nodes:
1964 self.hook('outgoing', node=hex(nodes[0]), source=source)
1965 self.hook('outgoing', node=hex(nodes[0]), source=source)
1965
1966
1966 return util.chunkbuffer(gengroup())
1967 return util.chunkbuffer(gengroup())
1967
1968
1968 def addchangegroup(self, source, srctype, url, emptyok=False):
1969 def addchangegroup(self, source, srctype, url, emptyok=False):
1969 """add changegroup to repo.
1970 """add changegroup to repo.
1970
1971
1971 return values:
1972 return values:
1972 - nothing changed or no source: 0
1973 - nothing changed or no source: 0
1973 - more heads than before: 1+added heads (2..n)
1974 - more heads than before: 1+added heads (2..n)
1974 - less heads than before: -1-removed heads (-2..-n)
1975 - less heads than before: -1-removed heads (-2..-n)
1975 - number of heads stays the same: 1
1976 - number of heads stays the same: 1
1976 """
1977 """
1977 def csmap(x):
1978 def csmap(x):
1978 self.ui.debug(_("add changeset %s\n") % short(x))
1979 self.ui.debug(_("add changeset %s\n") % short(x))
1979 return len(cl)
1980 return len(cl)
1980
1981
1981 def revmap(x):
1982 def revmap(x):
1982 return cl.rev(x)
1983 return cl.rev(x)
1983
1984
1984 if not source:
1985 if not source:
1985 return 0
1986 return 0
1986
1987
1987 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1988 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1988
1989
1989 changesets = files = revisions = 0
1990 changesets = files = revisions = 0
1990
1991
1991 # write changelog data to temp files so concurrent readers will not see
1992 # write changelog data to temp files so concurrent readers will not see
1992 # inconsistent view
1993 # inconsistent view
1993 cl = self.changelog
1994 cl = self.changelog
1994 cl.delayupdate()
1995 cl.delayupdate()
1995 oldheads = len(cl.heads())
1996 oldheads = len(cl.heads())
1996
1997
1997 tr = self.transaction()
1998 tr = self.transaction()
1998 try:
1999 try:
1999 trp = weakref.proxy(tr)
2000 trp = weakref.proxy(tr)
2000 # pull off the changeset group
2001 # pull off the changeset group
2001 self.ui.status(_("adding changesets\n"))
2002 self.ui.status(_("adding changesets\n"))
2002 cor = len(cl) - 1
2003 cor = len(cl) - 1
2003 chunkiter = changegroup.chunkiter(source)
2004 chunkiter = changegroup.chunkiter(source)
2004 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2005 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2005 raise util.Abort(_("received changelog group is empty"))
2006 raise util.Abort(_("received changelog group is empty"))
2006 cnr = len(cl) - 1
2007 cnr = len(cl) - 1
2007 changesets = cnr - cor
2008 changesets = cnr - cor
2008
2009
2009 # pull off the manifest group
2010 # pull off the manifest group
2010 self.ui.status(_("adding manifests\n"))
2011 self.ui.status(_("adding manifests\n"))
2011 chunkiter = changegroup.chunkiter(source)
2012 chunkiter = changegroup.chunkiter(source)
2012 # no need to check for empty manifest group here:
2013 # no need to check for empty manifest group here:
2013 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2014 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2014 # no new manifest will be created and the manifest group will
2015 # no new manifest will be created and the manifest group will
2015 # be empty during the pull
2016 # be empty during the pull
2016 self.manifest.addgroup(chunkiter, revmap, trp)
2017 self.manifest.addgroup(chunkiter, revmap, trp)
2017
2018
2018 # process the files
2019 # process the files
2019 self.ui.status(_("adding file changes\n"))
2020 self.ui.status(_("adding file changes\n"))
2020 while 1:
2021 while 1:
2021 f = changegroup.getchunk(source)
2022 f = changegroup.getchunk(source)
2022 if not f:
2023 if not f:
2023 break
2024 break
2024 self.ui.debug(_("adding %s revisions\n") % f)
2025 self.ui.debug(_("adding %s revisions\n") % f)
2025 fl = self.file(f)
2026 fl = self.file(f)
2026 o = len(fl)
2027 o = len(fl)
2027 chunkiter = changegroup.chunkiter(source)
2028 chunkiter = changegroup.chunkiter(source)
2028 if fl.addgroup(chunkiter, revmap, trp) is None:
2029 if fl.addgroup(chunkiter, revmap, trp) is None:
2029 raise util.Abort(_("received file revlog group is empty"))
2030 raise util.Abort(_("received file revlog group is empty"))
2030 revisions += len(fl) - o
2031 revisions += len(fl) - o
2031 files += 1
2032 files += 1
2032
2033
2033 # make changelog see real files again
2034 # make changelog see real files again
2034 cl.finalize(trp)
2035 cl.finalize(trp)
2035
2036
2036 newheads = len(self.changelog.heads())
2037 newheads = len(self.changelog.heads())
2037 heads = ""
2038 heads = ""
2038 if oldheads and newheads != oldheads:
2039 if oldheads and newheads != oldheads:
2039 heads = _(" (%+d heads)") % (newheads - oldheads)
2040 heads = _(" (%+d heads)") % (newheads - oldheads)
2040
2041
2041 self.ui.status(_("added %d changesets"
2042 self.ui.status(_("added %d changesets"
2042 " with %d changes to %d files%s\n")
2043 " with %d changes to %d files%s\n")
2043 % (changesets, revisions, files, heads))
2044 % (changesets, revisions, files, heads))
2044
2045
2045 if changesets > 0:
2046 if changesets > 0:
2046 self.hook('pretxnchangegroup', throw=True,
2047 self.hook('pretxnchangegroup', throw=True,
2047 node=hex(self.changelog.node(cor+1)), source=srctype,
2048 node=hex(self.changelog.node(cor+1)), source=srctype,
2048 url=url)
2049 url=url)
2049
2050
2050 tr.close()
2051 tr.close()
2051 finally:
2052 finally:
2052 del tr
2053 del tr
2053
2054
2054 if changesets > 0:
2055 if changesets > 0:
2055 # forcefully update the on-disk branch cache
2056 # forcefully update the on-disk branch cache
2056 self.ui.debug(_("updating the branch cache\n"))
2057 self.ui.debug(_("updating the branch cache\n"))
2057 self.branchtags()
2058 self.branchtags()
2058 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2059 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2059 source=srctype, url=url)
2060 source=srctype, url=url)
2060
2061
2061 for i in xrange(cor + 1, cnr + 1):
2062 for i in xrange(cor + 1, cnr + 1):
2062 self.hook("incoming", node=hex(self.changelog.node(i)),
2063 self.hook("incoming", node=hex(self.changelog.node(i)),
2063 source=srctype, url=url)
2064 source=srctype, url=url)
2064
2065
2065 # never return 0 here:
2066 # never return 0 here:
2066 if newheads < oldheads:
2067 if newheads < oldheads:
2067 return newheads - oldheads - 1
2068 return newheads - oldheads - 1
2068 else:
2069 else:
2069 return newheads - oldheads + 1
2070 return newheads - oldheads + 1
2070
2071
2071
2072
2072 def stream_in(self, remote):
2073 def stream_in(self, remote):
2073 fp = remote.stream_out()
2074 fp = remote.stream_out()
2074 l = fp.readline()
2075 l = fp.readline()
2075 try:
2076 try:
2076 resp = int(l)
2077 resp = int(l)
2077 except ValueError:
2078 except ValueError:
2078 raise util.UnexpectedOutput(
2079 raise util.UnexpectedOutput(
2079 _('Unexpected response from remote server:'), l)
2080 _('Unexpected response from remote server:'), l)
2080 if resp == 1:
2081 if resp == 1:
2081 raise util.Abort(_('operation forbidden by server'))
2082 raise util.Abort(_('operation forbidden by server'))
2082 elif resp == 2:
2083 elif resp == 2:
2083 raise util.Abort(_('locking the remote repository failed'))
2084 raise util.Abort(_('locking the remote repository failed'))
2084 elif resp != 0:
2085 elif resp != 0:
2085 raise util.Abort(_('the server sent an unknown error code'))
2086 raise util.Abort(_('the server sent an unknown error code'))
2086 self.ui.status(_('streaming all changes\n'))
2087 self.ui.status(_('streaming all changes\n'))
2087 l = fp.readline()
2088 l = fp.readline()
2088 try:
2089 try:
2089 total_files, total_bytes = map(int, l.split(' ', 1))
2090 total_files, total_bytes = map(int, l.split(' ', 1))
2090 except (ValueError, TypeError):
2091 except (ValueError, TypeError):
2091 raise util.UnexpectedOutput(
2092 raise util.UnexpectedOutput(
2092 _('Unexpected response from remote server:'), l)
2093 _('Unexpected response from remote server:'), l)
2093 self.ui.status(_('%d files to transfer, %s of data\n') %
2094 self.ui.status(_('%d files to transfer, %s of data\n') %
2094 (total_files, util.bytecount(total_bytes)))
2095 (total_files, util.bytecount(total_bytes)))
2095 start = time.time()
2096 start = time.time()
2096 for i in xrange(total_files):
2097 for i in xrange(total_files):
2097 # XXX doesn't support '\n' or '\r' in filenames
2098 # XXX doesn't support '\n' or '\r' in filenames
2098 l = fp.readline()
2099 l = fp.readline()
2099 try:
2100 try:
2100 name, size = l.split('\0', 1)
2101 name, size = l.split('\0', 1)
2101 size = int(size)
2102 size = int(size)
2102 except (ValueError, TypeError):
2103 except (ValueError, TypeError):
2103 raise util.UnexpectedOutput(
2104 raise util.UnexpectedOutput(
2104 _('Unexpected response from remote server:'), l)
2105 _('Unexpected response from remote server:'), l)
2105 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2106 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2106 ofp = self.sopener(name, 'w')
2107 ofp = self.sopener(name, 'w')
2107 for chunk in util.filechunkiter(fp, limit=size):
2108 for chunk in util.filechunkiter(fp, limit=size):
2108 ofp.write(chunk)
2109 ofp.write(chunk)
2109 ofp.close()
2110 ofp.close()
2110 elapsed = time.time() - start
2111 elapsed = time.time() - start
2111 if elapsed <= 0:
2112 if elapsed <= 0:
2112 elapsed = 0.001
2113 elapsed = 0.001
2113 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2114 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2114 (util.bytecount(total_bytes), elapsed,
2115 (util.bytecount(total_bytes), elapsed,
2115 util.bytecount(total_bytes / elapsed)))
2116 util.bytecount(total_bytes / elapsed)))
2116 self.invalidate()
2117 self.invalidate()
2117 return len(self.heads()) + 1
2118 return len(self.heads()) + 1
2118
2119
2119 def clone(self, remote, heads=[], stream=False):
2120 def clone(self, remote, heads=[], stream=False):
2120 '''clone remote repository.
2121 '''clone remote repository.
2121
2122
2122 keyword arguments:
2123 keyword arguments:
2123 heads: list of revs to clone (forces use of pull)
2124 heads: list of revs to clone (forces use of pull)
2124 stream: use streaming clone if possible'''
2125 stream: use streaming clone if possible'''
2125
2126
2126 # now, all clients that can request uncompressed clones can
2127 # now, all clients that can request uncompressed clones can
2127 # read repo formats supported by all servers that can serve
2128 # read repo formats supported by all servers that can serve
2128 # them.
2129 # them.
2129
2130
2130 # if revlog format changes, client will have to check version
2131 # if revlog format changes, client will have to check version
2131 # and format flags on "stream" capability, and use
2132 # and format flags on "stream" capability, and use
2132 # uncompressed only if compatible.
2133 # uncompressed only if compatible.
2133
2134
2134 if stream and not heads and remote.capable('stream'):
2135 if stream and not heads and remote.capable('stream'):
2135 return self.stream_in(remote)
2136 return self.stream_in(remote)
2136 return self.pull(remote, heads)
2137 return self.pull(remote, heads)
2137
2138
2138 # used to avoid circular references so destructors work
2139 # used to avoid circular references so destructors work
2139 def aftertrans(files):
2140 def aftertrans(files):
2140 renamefiles = [tuple(t) for t in files]
2141 renamefiles = [tuple(t) for t in files]
2141 def a():
2142 def a():
2142 for src, dest in renamefiles:
2143 for src, dest in renamefiles:
2143 util.rename(src, dest)
2144 util.rename(src, dest)
2144 return a
2145 return a
2145
2146
2146 def instance(ui, path, create):
2147 def instance(ui, path, create):
2147 return localrepository(ui, util.drop_scheme('file', path), create)
2148 return localrepository(ui, util.drop_scheme('file', path), create)
2148
2149
2149 def islocal(path):
2150 def islocal(path):
2150 return True
2151 return True
@@ -1,82 +1,82
1 updating working directory
1 updating working directory
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 pushing to ../a
3 pushing to ../a
4 searching for changes
4 searching for changes
5 abort: push creates new remote heads!
5 abort: push creates new remote heads!
6 (did you forget to merge? use push -f to force)
6 (did you forget to merge? use push -f to force)
7 pulling from ../a
7 pulling from ../a
8 searching for changes
8 searching for changes
9 adding changesets
9 adding changesets
10 adding manifests
10 adding manifests
11 adding file changes
11 adding file changes
12 added 1 changesets with 1 changes to 1 files (+1 heads)
12 added 1 changesets with 1 changes to 1 files (+1 heads)
13 (run 'hg heads' to see heads, 'hg merge' to merge)
13 (run 'hg heads' to see heads, 'hg merge' to merge)
14 pushing to ../a
14 pushing to ../a
15 searching for changes
15 searching for changes
16 abort: push creates new remote heads!
16 abort: push creates new remote heads!
17 (did you forget to merge? use push -f to force)
17 (did you forget to merge? use push -f to force)
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 (branch merge, don't forget to commit)
19 (branch merge, don't forget to commit)
20 pushing to ../a
20 pushing to ../a
21 searching for changes
21 searching for changes
22 adding changesets
22 adding changesets
23 adding manifests
23 adding manifests
24 adding file changes
24 adding file changes
25 added 2 changesets with 1 changes to 2 files
25 added 2 changesets with 1 changes to 1 files
26 adding foo
26 adding foo
27 updating working directory
27 updating working directory
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 created new head
30 created new head
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 created new head
32 created new head
33 merging foo
33 merging foo
34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
35 (branch merge, don't forget to commit)
35 (branch merge, don't forget to commit)
36 pushing to ../c
36 pushing to ../c
37 searching for changes
37 searching for changes
38 abort: push creates new remote heads!
38 abort: push creates new remote heads!
39 (did you forget to merge? use push -f to force)
39 (did you forget to merge? use push -f to force)
40 1
40 1
41 pushing to ../c
41 pushing to ../c
42 searching for changes
42 searching for changes
43 no changes found
43 no changes found
44 0
44 0
45 pushing to ../c
45 pushing to ../c
46 searching for changes
46 searching for changes
47 abort: push creates new remote heads!
47 abort: push creates new remote heads!
48 (did you forget to merge? use push -f to force)
48 (did you forget to merge? use push -f to force)
49 1
49 1
50 pushing to ../c
50 pushing to ../c
51 searching for changes
51 searching for changes
52 abort: push creates new remote heads!
52 abort: push creates new remote heads!
53 (did you forget to merge? use push -f to force)
53 (did you forget to merge? use push -f to force)
54 1
54 1
55 pushing to ../c
55 pushing to ../c
56 searching for changes
56 searching for changes
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 2 changesets with 2 changes to 1 files (+2 heads)
60 added 2 changesets with 2 changes to 1 files (+2 heads)
61 0
61 0
62 pushing to ../c
62 pushing to ../c
63 searching for changes
63 searching for changes
64 adding changesets
64 adding changesets
65 adding manifests
65 adding manifests
66 adding file changes
66 adding file changes
67 added 1 changesets with 1 changes to 1 files (-1 heads)
67 added 1 changesets with 1 changes to 1 files (-1 heads)
68 0
68 0
69 pushing to ../e
69 pushing to ../e
70 searching for changes
70 searching for changes
71 adding changesets
71 adding changesets
72 adding manifests
72 adding manifests
73 adding file changes
73 adding file changes
74 added 1 changesets with 1 changes to 1 files
74 added 1 changesets with 1 changes to 1 files
75 0
75 0
76 pushing to ../e
76 pushing to ../e
77 searching for changes
77 searching for changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 1 changesets with 1 changes to 1 files
81 added 1 changesets with 1 changes to 1 files
82 0
82 0
General Comments 0
You need to be logged in to leave comments. Login now