##// END OF EJS Templates
findincoming: do the binary search in branches in parallel
Benoit Boissinot -
r7208:acb87c5b default
parent child Browse files
Show More
@@ -1,2101 +1,2103 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 # create an invalid changelog
38 # create an invalid changelog
39 self.opener("00changelog.i", "a").write(
39 self.opener("00changelog.i", "a").write(
40 '\0\0\0\2' # represents revlogv2
40 '\0\0\0\2' # represents revlogv2
41 ' dummy changelog to prevent using the old repo layout'
41 ' dummy changelog to prevent using the old repo layout'
42 )
42 )
43 reqfile = self.opener("requires", "w")
43 reqfile = self.opener("requires", "w")
44 for r in requirements:
44 for r in requirements:
45 reqfile.write("%s\n" % r)
45 reqfile.write("%s\n" % r)
46 reqfile.close()
46 reqfile.close()
47 else:
47 else:
48 raise repo.RepoError(_("repository %s not found") % path)
48 raise repo.RepoError(_("repository %s not found") % path)
49 elif create:
49 elif create:
50 raise repo.RepoError(_("repository %s already exists") % path)
50 raise repo.RepoError(_("repository %s already exists") % path)
51 else:
51 else:
52 # find requirements
52 # find requirements
53 requirements = []
53 requirements = []
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 for r in requirements:
56 for r in requirements:
57 if r not in self.supported:
57 if r not in self.supported:
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 except IOError, inst:
59 except IOError, inst:
60 if inst.errno != errno.ENOENT:
60 if inst.errno != errno.ENOENT:
61 raise
61 raise
62
62
63 self.store = store.store(requirements, self.path, util.opener)
63 self.store = store.store(requirements, self.path, util.opener)
64 self.spath = self.store.path
64 self.spath = self.store.path
65 self.sopener = self.store.opener
65 self.sopener = self.store.opener
66 self.sjoin = self.store.join
66 self.sjoin = self.store.join
67 self.opener.createmode = self.store.createmode
67 self.opener.createmode = self.store.createmode
68
68
69 self.ui = ui.ui(parentui=parentui)
69 self.ui = ui.ui(parentui=parentui)
70 try:
70 try:
71 self.ui.readconfig(self.join("hgrc"), self.root)
71 self.ui.readconfig(self.join("hgrc"), self.root)
72 extensions.loadall(self.ui)
72 extensions.loadall(self.ui)
73 except IOError:
73 except IOError:
74 pass
74 pass
75
75
76 self.tagscache = None
76 self.tagscache = None
77 self._tagstypecache = None
77 self._tagstypecache = None
78 self.branchcache = None
78 self.branchcache = None
79 self._ubranchcache = None # UTF-8 version of branchcache
79 self._ubranchcache = None # UTF-8 version of branchcache
80 self._branchcachetip = None
80 self._branchcachetip = None
81 self.nodetagscache = None
81 self.nodetagscache = None
82 self.filterpats = {}
82 self.filterpats = {}
83 self._datafilters = {}
83 self._datafilters = {}
84 self._transref = self._lockref = self._wlockref = None
84 self._transref = self._lockref = self._wlockref = None
85
85
86 def __getattr__(self, name):
86 def __getattr__(self, name):
87 if name == 'changelog':
87 if name == 'changelog':
88 self.changelog = changelog.changelog(self.sopener)
88 self.changelog = changelog.changelog(self.sopener)
89 self.sopener.defversion = self.changelog.version
89 self.sopener.defversion = self.changelog.version
90 return self.changelog
90 return self.changelog
91 if name == 'manifest':
91 if name == 'manifest':
92 self.changelog
92 self.changelog
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94 return self.manifest
94 return self.manifest
95 if name == 'dirstate':
95 if name == 'dirstate':
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 return self.dirstate
97 return self.dirstate
98 else:
98 else:
99 raise AttributeError(name)
99 raise AttributeError(name)
100
100
101 def __getitem__(self, changeid):
101 def __getitem__(self, changeid):
102 if changeid == None:
102 if changeid == None:
103 return context.workingctx(self)
103 return context.workingctx(self)
104 return context.changectx(self, changeid)
104 return context.changectx(self, changeid)
105
105
106 def __nonzero__(self):
106 def __nonzero__(self):
107 return True
107 return True
108
108
109 def __len__(self):
109 def __len__(self):
110 return len(self.changelog)
110 return len(self.changelog)
111
111
112 def __iter__(self):
112 def __iter__(self):
113 for i in xrange(len(self)):
113 for i in xrange(len(self)):
114 yield i
114 yield i
115
115
116 def url(self):
116 def url(self):
117 return 'file:' + self.root
117 return 'file:' + self.root
118
118
119 def hook(self, name, throw=False, **args):
119 def hook(self, name, throw=False, **args):
120 return hook.hook(self.ui, self, name, throw, **args)
120 return hook.hook(self.ui, self, name, throw, **args)
121
121
122 tag_disallowed = ':\r\n'
122 tag_disallowed = ':\r\n'
123
123
124 def _tag(self, names, node, message, local, user, date, parent=None,
124 def _tag(self, names, node, message, local, user, date, parent=None,
125 extra={}):
125 extra={}):
126 use_dirstate = parent is None
126 use_dirstate = parent is None
127
127
128 if isinstance(names, str):
128 if isinstance(names, str):
129 allchars = names
129 allchars = names
130 names = (names,)
130 names = (names,)
131 else:
131 else:
132 allchars = ''.join(names)
132 allchars = ''.join(names)
133 for c in self.tag_disallowed:
133 for c in self.tag_disallowed:
134 if c in allchars:
134 if c in allchars:
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
136
136
137 for name in names:
137 for name in names:
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 local=local)
139 local=local)
140
140
141 def writetags(fp, names, munge, prevtags):
141 def writetags(fp, names, munge, prevtags):
142 fp.seek(0, 2)
142 fp.seek(0, 2)
143 if prevtags and prevtags[-1] != '\n':
143 if prevtags and prevtags[-1] != '\n':
144 fp.write('\n')
144 fp.write('\n')
145 for name in names:
145 for name in names:
146 m = munge and munge(name) or name
146 m = munge and munge(name) or name
147 if self._tagstypecache and name in self._tagstypecache:
147 if self._tagstypecache and name in self._tagstypecache:
148 old = self.tagscache.get(name, nullid)
148 old = self.tagscache.get(name, nullid)
149 fp.write('%s %s\n' % (hex(old), m))
149 fp.write('%s %s\n' % (hex(old), m))
150 fp.write('%s %s\n' % (hex(node), m))
150 fp.write('%s %s\n' % (hex(node), m))
151 fp.close()
151 fp.close()
152
152
153 prevtags = ''
153 prevtags = ''
154 if local:
154 if local:
155 try:
155 try:
156 fp = self.opener('localtags', 'r+')
156 fp = self.opener('localtags', 'r+')
157 except IOError, err:
157 except IOError, err:
158 fp = self.opener('localtags', 'a')
158 fp = self.opener('localtags', 'a')
159 else:
159 else:
160 prevtags = fp.read()
160 prevtags = fp.read()
161
161
162 # local tags are stored in the current charset
162 # local tags are stored in the current charset
163 writetags(fp, names, None, prevtags)
163 writetags(fp, names, None, prevtags)
164 for name in names:
164 for name in names:
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166 return
166 return
167
167
168 if use_dirstate:
168 if use_dirstate:
169 try:
169 try:
170 fp = self.wfile('.hgtags', 'rb+')
170 fp = self.wfile('.hgtags', 'rb+')
171 except IOError, err:
171 except IOError, err:
172 fp = self.wfile('.hgtags', 'ab')
172 fp = self.wfile('.hgtags', 'ab')
173 else:
173 else:
174 prevtags = fp.read()
174 prevtags = fp.read()
175 else:
175 else:
176 try:
176 try:
177 prevtags = self.filectx('.hgtags', parent).data()
177 prevtags = self.filectx('.hgtags', parent).data()
178 except revlog.LookupError:
178 except revlog.LookupError:
179 pass
179 pass
180 fp = self.wfile('.hgtags', 'wb')
180 fp = self.wfile('.hgtags', 'wb')
181 if prevtags:
181 if prevtags:
182 fp.write(prevtags)
182 fp.write(prevtags)
183
183
184 # committed tags are stored in UTF-8
184 # committed tags are stored in UTF-8
185 writetags(fp, names, util.fromlocal, prevtags)
185 writetags(fp, names, util.fromlocal, prevtags)
186
186
187 if use_dirstate and '.hgtags' not in self.dirstate:
187 if use_dirstate and '.hgtags' not in self.dirstate:
188 self.add(['.hgtags'])
188 self.add(['.hgtags'])
189
189
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 extra=extra)
191 extra=extra)
192
192
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195
195
196 return tagnode
196 return tagnode
197
197
198 def tag(self, names, node, message, local, user, date):
198 def tag(self, names, node, message, local, user, date):
199 '''tag a revision with one or more symbolic names.
199 '''tag a revision with one or more symbolic names.
200
200
201 names is a list of strings or, when adding a single tag, names may be a
201 names is a list of strings or, when adding a single tag, names may be a
202 string.
202 string.
203
203
204 if local is True, the tags are stored in a per-repository file.
204 if local is True, the tags are stored in a per-repository file.
205 otherwise, they are stored in the .hgtags file, and a new
205 otherwise, they are stored in the .hgtags file, and a new
206 changeset is committed with the change.
206 changeset is committed with the change.
207
207
208 keyword arguments:
208 keyword arguments:
209
209
210 local: whether to store tags in non-version-controlled file
210 local: whether to store tags in non-version-controlled file
211 (default False)
211 (default False)
212
212
213 message: commit message to use if committing
213 message: commit message to use if committing
214
214
215 user: name of user to use if committing
215 user: name of user to use if committing
216
216
217 date: date tuple to use if committing'''
217 date: date tuple to use if committing'''
218
218
219 for x in self.status()[:5]:
219 for x in self.status()[:5]:
220 if '.hgtags' in x:
220 if '.hgtags' in x:
221 raise util.Abort(_('working copy of .hgtags is changed '
221 raise util.Abort(_('working copy of .hgtags is changed '
222 '(please commit .hgtags manually)'))
222 '(please commit .hgtags manually)'))
223
223
224 self._tag(names, node, message, local, user, date)
224 self._tag(names, node, message, local, user, date)
225
225
226 def tags(self):
226 def tags(self):
227 '''return a mapping of tag to node'''
227 '''return a mapping of tag to node'''
228 if self.tagscache:
228 if self.tagscache:
229 return self.tagscache
229 return self.tagscache
230
230
231 globaltags = {}
231 globaltags = {}
232 tagtypes = {}
232 tagtypes = {}
233
233
234 def readtags(lines, fn, tagtype):
234 def readtags(lines, fn, tagtype):
235 filetags = {}
235 filetags = {}
236 count = 0
236 count = 0
237
237
238 def warn(msg):
238 def warn(msg):
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240
240
241 for l in lines:
241 for l in lines:
242 count += 1
242 count += 1
243 if not l:
243 if not l:
244 continue
244 continue
245 s = l.split(" ", 1)
245 s = l.split(" ", 1)
246 if len(s) != 2:
246 if len(s) != 2:
247 warn(_("cannot parse entry"))
247 warn(_("cannot parse entry"))
248 continue
248 continue
249 node, key = s
249 node, key = s
250 key = util.tolocal(key.strip()) # stored in UTF-8
250 key = util.tolocal(key.strip()) # stored in UTF-8
251 try:
251 try:
252 bin_n = bin(node)
252 bin_n = bin(node)
253 except TypeError:
253 except TypeError:
254 warn(_("node '%s' is not well formed") % node)
254 warn(_("node '%s' is not well formed") % node)
255 continue
255 continue
256 if bin_n not in self.changelog.nodemap:
256 if bin_n not in self.changelog.nodemap:
257 warn(_("tag '%s' refers to unknown node") % key)
257 warn(_("tag '%s' refers to unknown node") % key)
258 continue
258 continue
259
259
260 h = []
260 h = []
261 if key in filetags:
261 if key in filetags:
262 n, h = filetags[key]
262 n, h = filetags[key]
263 h.append(n)
263 h.append(n)
264 filetags[key] = (bin_n, h)
264 filetags[key] = (bin_n, h)
265
265
266 for k, nh in filetags.items():
266 for k, nh in filetags.items():
267 if k not in globaltags:
267 if k not in globaltags:
268 globaltags[k] = nh
268 globaltags[k] = nh
269 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
270 continue
270 continue
271
271
272 # we prefer the global tag if:
272 # we prefer the global tag if:
273 # it supercedes us OR
273 # it supercedes us OR
274 # mutual supercedes and it has a higher rank
274 # mutual supercedes and it has a higher rank
275 # otherwise we win because we're tip-most
275 # otherwise we win because we're tip-most
276 an, ah = nh
276 an, ah = nh
277 bn, bh = globaltags[k]
277 bn, bh = globaltags[k]
278 if (bn != an and an in bh and
278 if (bn != an and an in bh and
279 (bn not in ah or len(bh) > len(ah))):
279 (bn not in ah or len(bh) > len(ah))):
280 an = bn
280 an = bn
281 ah.extend([n for n in bh if n not in ah])
281 ah.extend([n for n in bh if n not in ah])
282 globaltags[k] = an, ah
282 globaltags[k] = an, ah
283 tagtypes[k] = tagtype
283 tagtypes[k] = tagtype
284
284
285 # read the tags file from each head, ending with the tip
285 # read the tags file from each head, ending with the tip
286 f = None
286 f = None
287 for rev, node, fnode in self._hgtagsnodes():
287 for rev, node, fnode in self._hgtagsnodes():
288 f = (f and f.filectx(fnode) or
288 f = (f and f.filectx(fnode) or
289 self.filectx('.hgtags', fileid=fnode))
289 self.filectx('.hgtags', fileid=fnode))
290 readtags(f.data().splitlines(), f, "global")
290 readtags(f.data().splitlines(), f, "global")
291
291
292 try:
292 try:
293 data = util.fromlocal(self.opener("localtags").read())
293 data = util.fromlocal(self.opener("localtags").read())
294 # localtags are stored in the local character set
294 # localtags are stored in the local character set
295 # while the internal tag table is stored in UTF-8
295 # while the internal tag table is stored in UTF-8
296 readtags(data.splitlines(), "localtags", "local")
296 readtags(data.splitlines(), "localtags", "local")
297 except IOError:
297 except IOError:
298 pass
298 pass
299
299
300 self.tagscache = {}
300 self.tagscache = {}
301 self._tagstypecache = {}
301 self._tagstypecache = {}
302 for k,nh in globaltags.items():
302 for k,nh in globaltags.items():
303 n = nh[0]
303 n = nh[0]
304 if n != nullid:
304 if n != nullid:
305 self.tagscache[k] = n
305 self.tagscache[k] = n
306 self._tagstypecache[k] = tagtypes[k]
306 self._tagstypecache[k] = tagtypes[k]
307 self.tagscache['tip'] = self.changelog.tip()
307 self.tagscache['tip'] = self.changelog.tip()
308 return self.tagscache
308 return self.tagscache
309
309
310 def tagtype(self, tagname):
310 def tagtype(self, tagname):
311 '''
311 '''
312 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
313
313
314 'local' : a local tag
314 'local' : a local tag
315 'global' : a global tag
315 'global' : a global tag
316 None : tag does not exist
316 None : tag does not exist
317 '''
317 '''
318
318
319 self.tags()
319 self.tags()
320
320
321 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
322
322
323 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
324 heads = self.heads()
324 heads = self.heads()
325 heads.reverse()
325 heads.reverse()
326 last = {}
326 last = {}
327 ret = []
327 ret = []
328 for node in heads:
328 for node in heads:
329 c = self[node]
329 c = self[node]
330 rev = c.rev()
330 rev = c.rev()
331 try:
331 try:
332 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
333 except revlog.LookupError:
333 except revlog.LookupError:
334 continue
334 continue
335 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
336 if fnode in last:
336 if fnode in last:
337 ret[last[fnode]] = None
337 ret[last[fnode]] = None
338 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
339 return [item for item in ret if item]
339 return [item for item in ret if item]
340
340
341 def tagslist(self):
341 def tagslist(self):
342 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
343 l = []
343 l = []
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 try:
345 try:
346 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
347 except:
347 except:
348 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
349 l.append((r, t, n))
349 l.append((r, t, n))
350 return [(t, n) for r, t, n in util.sort(l)]
350 return [(t, n) for r, t, n in util.sort(l)]
351
351
352 def nodetags(self, node):
352 def nodetags(self, node):
353 '''return the tags associated with a node'''
353 '''return the tags associated with a node'''
354 if not self.nodetagscache:
354 if not self.nodetagscache:
355 self.nodetagscache = {}
355 self.nodetagscache = {}
356 for t, n in self.tags().items():
356 for t, n in self.tags().items():
357 self.nodetagscache.setdefault(n, []).append(t)
357 self.nodetagscache.setdefault(n, []).append(t)
358 return self.nodetagscache.get(node, [])
358 return self.nodetagscache.get(node, [])
359
359
360 def _branchtags(self, partial, lrev):
360 def _branchtags(self, partial, lrev):
361 tiprev = len(self) - 1
361 tiprev = len(self) - 1
362 if lrev != tiprev:
362 if lrev != tiprev:
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365
365
366 return partial
366 return partial
367
367
368 def branchtags(self):
368 def branchtags(self):
369 tip = self.changelog.tip()
369 tip = self.changelog.tip()
370 if self.branchcache is not None and self._branchcachetip == tip:
370 if self.branchcache is not None and self._branchcachetip == tip:
371 return self.branchcache
371 return self.branchcache
372
372
373 oldtip = self._branchcachetip
373 oldtip = self._branchcachetip
374 self._branchcachetip = tip
374 self._branchcachetip = tip
375 if self.branchcache is None:
375 if self.branchcache is None:
376 self.branchcache = {} # avoid recursion in changectx
376 self.branchcache = {} # avoid recursion in changectx
377 else:
377 else:
378 self.branchcache.clear() # keep using the same dict
378 self.branchcache.clear() # keep using the same dict
379 if oldtip is None or oldtip not in self.changelog.nodemap:
379 if oldtip is None or oldtip not in self.changelog.nodemap:
380 partial, last, lrev = self._readbranchcache()
380 partial, last, lrev = self._readbranchcache()
381 else:
381 else:
382 lrev = self.changelog.rev(oldtip)
382 lrev = self.changelog.rev(oldtip)
383 partial = self._ubranchcache
383 partial = self._ubranchcache
384
384
385 self._branchtags(partial, lrev)
385 self._branchtags(partial, lrev)
386
386
387 # the branch cache is stored on disk as UTF-8, but in the local
387 # the branch cache is stored on disk as UTF-8, but in the local
388 # charset internally
388 # charset internally
389 for k, v in partial.items():
389 for k, v in partial.items():
390 self.branchcache[util.tolocal(k)] = v
390 self.branchcache[util.tolocal(k)] = v
391 self._ubranchcache = partial
391 self._ubranchcache = partial
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if lrev >= len(self) or self[lrev].node() != last:
406 if lrev >= len(self) or self[lrev].node() != last:
407 # invalidate the cache
407 # invalidate the cache
408 raise ValueError('invalidating branch cache (tip differs)')
408 raise ValueError('invalidating branch cache (tip differs)')
409 for l in lines:
409 for l in lines:
410 if not l: continue
410 if not l: continue
411 node, label = l.split(" ", 1)
411 node, label = l.split(" ", 1)
412 partial[label.strip()] = bin(node)
412 partial[label.strip()] = bin(node)
413 except (KeyboardInterrupt, util.SignalInterrupt):
413 except (KeyboardInterrupt, util.SignalInterrupt):
414 raise
414 raise
415 except Exception, inst:
415 except Exception, inst:
416 if self.ui.debugflag:
416 if self.ui.debugflag:
417 self.ui.warn(str(inst), '\n')
417 self.ui.warn(str(inst), '\n')
418 partial, last, lrev = {}, nullid, nullrev
418 partial, last, lrev = {}, nullid, nullrev
419 return partial, last, lrev
419 return partial, last, lrev
420
420
421 def _writebranchcache(self, branches, tip, tiprev):
421 def _writebranchcache(self, branches, tip, tiprev):
422 try:
422 try:
423 f = self.opener("branch.cache", "w", atomictemp=True)
423 f = self.opener("branch.cache", "w", atomictemp=True)
424 f.write("%s %s\n" % (hex(tip), tiprev))
424 f.write("%s %s\n" % (hex(tip), tiprev))
425 for label, node in branches.iteritems():
425 for label, node in branches.iteritems():
426 f.write("%s %s\n" % (hex(node), label))
426 f.write("%s %s\n" % (hex(node), label))
427 f.rename()
427 f.rename()
428 except (IOError, OSError):
428 except (IOError, OSError):
429 pass
429 pass
430
430
431 def _updatebranchcache(self, partial, start, end):
431 def _updatebranchcache(self, partial, start, end):
432 for r in xrange(start, end):
432 for r in xrange(start, end):
433 c = self[r]
433 c = self[r]
434 b = c.branch()
434 b = c.branch()
435 partial[b] = c.node()
435 partial[b] = c.node()
436
436
437 def lookup(self, key):
437 def lookup(self, key):
438 if key == '.':
438 if key == '.':
439 return self.dirstate.parents()[0]
439 return self.dirstate.parents()[0]
440 elif key == 'null':
440 elif key == 'null':
441 return nullid
441 return nullid
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise repo.RepoError(_("unknown revision '%s'") % key)
457 raise repo.RepoError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 fn = None
505 fn = None
506 params = cmd
506 params = cmd
507 for name, filterfn in self._datafilters.iteritems():
507 for name, filterfn in self._datafilters.iteritems():
508 if cmd.startswith(name):
508 if cmd.startswith(name):
509 fn = filterfn
509 fn = filterfn
510 params = cmd[len(name):].lstrip()
510 params = cmd[len(name):].lstrip()
511 break
511 break
512 if not fn:
512 if not fn:
513 fn = lambda s, c, **kwargs: util.filter(s, c)
513 fn = lambda s, c, **kwargs: util.filter(s, c)
514 # Wrap old filters not supporting keyword arguments
514 # Wrap old filters not supporting keyword arguments
515 if not inspect.getargspec(fn)[2]:
515 if not inspect.getargspec(fn)[2]:
516 oldfn = fn
516 oldfn = fn
517 fn = lambda s, c, **kwargs: oldfn(s, c)
517 fn = lambda s, c, **kwargs: oldfn(s, c)
518 l.append((mf, fn, params))
518 l.append((mf, fn, params))
519 self.filterpats[filter] = l
519 self.filterpats[filter] = l
520
520
521 for mf, fn, cmd in self.filterpats[filter]:
521 for mf, fn, cmd in self.filterpats[filter]:
522 if mf(filename):
522 if mf(filename):
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 break
525 break
526
526
527 return data
527 return data
528
528
529 def adddatafilter(self, name, filter):
529 def adddatafilter(self, name, filter):
530 self._datafilters[name] = filter
530 self._datafilters[name] = filter
531
531
532 def wread(self, filename):
532 def wread(self, filename):
533 if self._link(filename):
533 if self._link(filename):
534 data = os.readlink(self.wjoin(filename))
534 data = os.readlink(self.wjoin(filename))
535 else:
535 else:
536 data = self.wopener(filename, 'r').read()
536 data = self.wopener(filename, 'r').read()
537 return self._filter("encode", filename, data)
537 return self._filter("encode", filename, data)
538
538
539 def wwrite(self, filename, data, flags):
539 def wwrite(self, filename, data, flags):
540 data = self._filter("decode", filename, data)
540 data = self._filter("decode", filename, data)
541 try:
541 try:
542 os.unlink(self.wjoin(filename))
542 os.unlink(self.wjoin(filename))
543 except OSError:
543 except OSError:
544 pass
544 pass
545 if 'l' in flags:
545 if 'l' in flags:
546 self.wopener.symlink(data, filename)
546 self.wopener.symlink(data, filename)
547 else:
547 else:
548 self.wopener(filename, 'w').write(data)
548 self.wopener(filename, 'w').write(data)
549 if 'x' in flags:
549 if 'x' in flags:
550 util.set_flags(self.wjoin(filename), False, True)
550 util.set_flags(self.wjoin(filename), False, True)
551
551
552 def wwritedata(self, filename, data):
552 def wwritedata(self, filename, data):
553 return self._filter("decode", filename, data)
553 return self._filter("decode", filename, data)
554
554
555 def transaction(self):
555 def transaction(self):
556 if self._transref and self._transref():
556 if self._transref and self._transref():
557 return self._transref().nest()
557 return self._transref().nest()
558
558
559 # abort here if the journal already exists
559 # abort here if the journal already exists
560 if os.path.exists(self.sjoin("journal")):
560 if os.path.exists(self.sjoin("journal")):
561 raise repo.RepoError(_("journal already exists - run hg recover"))
561 raise repo.RepoError(_("journal already exists - run hg recover"))
562
562
563 # save dirstate for rollback
563 # save dirstate for rollback
564 try:
564 try:
565 ds = self.opener("dirstate").read()
565 ds = self.opener("dirstate").read()
566 except IOError:
566 except IOError:
567 ds = ""
567 ds = ""
568 self.opener("journal.dirstate", "w").write(ds)
568 self.opener("journal.dirstate", "w").write(ds)
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
570
570
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
573 (self.join("journal.branch"), self.join("undo.branch"))]
573 (self.join("journal.branch"), self.join("undo.branch"))]
574 tr = transaction.transaction(self.ui.warn, self.sopener,
574 tr = transaction.transaction(self.ui.warn, self.sopener,
575 self.sjoin("journal"),
575 self.sjoin("journal"),
576 aftertrans(renames),
576 aftertrans(renames),
577 self.store.createmode)
577 self.store.createmode)
578 self._transref = weakref.ref(tr)
578 self._transref = weakref.ref(tr)
579 return tr
579 return tr
580
580
581 def recover(self):
581 def recover(self):
582 l = self.lock()
582 l = self.lock()
583 try:
583 try:
584 if os.path.exists(self.sjoin("journal")):
584 if os.path.exists(self.sjoin("journal")):
585 self.ui.status(_("rolling back interrupted transaction\n"))
585 self.ui.status(_("rolling back interrupted transaction\n"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
587 self.invalidate()
587 self.invalidate()
588 return True
588 return True
589 else:
589 else:
590 self.ui.warn(_("no interrupted transaction available\n"))
590 self.ui.warn(_("no interrupted transaction available\n"))
591 return False
591 return False
592 finally:
592 finally:
593 del l
593 del l
594
594
595 def rollback(self):
595 def rollback(self):
596 wlock = lock = None
596 wlock = lock = None
597 try:
597 try:
598 wlock = self.wlock()
598 wlock = self.wlock()
599 lock = self.lock()
599 lock = self.lock()
600 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
601 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 try:
604 try:
605 branch = self.opener("undo.branch").read()
605 branch = self.opener("undo.branch").read()
606 self.dirstate.setbranch(branch)
606 self.dirstate.setbranch(branch)
607 except IOError:
607 except IOError:
608 self.ui.warn(_("Named branch could not be reset, "
608 self.ui.warn(_("Named branch could not be reset, "
609 "current branch still is: %s\n")
609 "current branch still is: %s\n")
610 % util.tolocal(self.dirstate.branch()))
610 % util.tolocal(self.dirstate.branch()))
611 self.invalidate()
611 self.invalidate()
612 self.dirstate.invalidate()
612 self.dirstate.invalidate()
613 else:
613 else:
614 self.ui.warn(_("no rollback information available\n"))
614 self.ui.warn(_("no rollback information available\n"))
615 finally:
615 finally:
616 del lock, wlock
616 del lock, wlock
617
617
618 def invalidate(self):
618 def invalidate(self):
619 for a in "changelog manifest".split():
619 for a in "changelog manifest".split():
620 if a in self.__dict__:
620 if a in self.__dict__:
621 delattr(self, a)
621 delattr(self, a)
622 self.tagscache = None
622 self.tagscache = None
623 self._tagstypecache = None
623 self._tagstypecache = None
624 self.nodetagscache = None
624 self.nodetagscache = None
625 self.branchcache = None
625 self.branchcache = None
626 self._ubranchcache = None
626 self._ubranchcache = None
627 self._branchcachetip = None
627 self._branchcachetip = None
628
628
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
630 try:
630 try:
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
632 except lock.LockHeld, inst:
632 except lock.LockHeld, inst:
633 if not wait:
633 if not wait:
634 raise
634 raise
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
636 (desc, inst.locker))
636 (desc, inst.locker))
637 # default to 600 seconds timeout
637 # default to 600 seconds timeout
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
639 releasefn, desc=desc)
639 releasefn, desc=desc)
640 if acquirefn:
640 if acquirefn:
641 acquirefn()
641 acquirefn()
642 return l
642 return l
643
643
644 def lock(self, wait=True):
644 def lock(self, wait=True):
645 if self._lockref and self._lockref():
645 if self._lockref and self._lockref():
646 return self._lockref()
646 return self._lockref()
647
647
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
649 _('repository %s') % self.origroot)
649 _('repository %s') % self.origroot)
650 self._lockref = weakref.ref(l)
650 self._lockref = weakref.ref(l)
651 return l
651 return l
652
652
653 def wlock(self, wait=True):
653 def wlock(self, wait=True):
654 if self._wlockref and self._wlockref():
654 if self._wlockref and self._wlockref():
655 return self._wlockref()
655 return self._wlockref()
656
656
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
658 self.dirstate.invalidate, _('working directory of %s') %
658 self.dirstate.invalidate, _('working directory of %s') %
659 self.origroot)
659 self.origroot)
660 self._wlockref = weakref.ref(l)
660 self._wlockref = weakref.ref(l)
661 return l
661 return l
662
662
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
664 """
664 """
665 commit an individual file as part of a larger transaction
665 commit an individual file as part of a larger transaction
666 """
666 """
667
667
668 fn = fctx.path()
668 fn = fctx.path()
669 t = fctx.data()
669 t = fctx.data()
670 fl = self.file(fn)
670 fl = self.file(fn)
671 fp1 = manifest1.get(fn, nullid)
671 fp1 = manifest1.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
673
673
674 meta = {}
674 meta = {}
675 cp = fctx.renamed()
675 cp = fctx.renamed()
676 if cp and cp[0] != fn:
676 if cp and cp[0] != fn:
677 # Mark the new revision of this file as a copy of another
677 # Mark the new revision of this file as a copy of another
678 # file. This copy data will effectively act as a parent
678 # file. This copy data will effectively act as a parent
679 # of this new revision. If this is a merge, the first
679 # of this new revision. If this is a merge, the first
680 # parent will be the nullid (meaning "look up the copy data")
680 # parent will be the nullid (meaning "look up the copy data")
681 # and the second one will be the other parent. For example:
681 # and the second one will be the other parent. For example:
682 #
682 #
683 # 0 --- 1 --- 3 rev1 changes file foo
683 # 0 --- 1 --- 3 rev1 changes file foo
684 # \ / rev2 renames foo to bar and changes it
684 # \ / rev2 renames foo to bar and changes it
685 # \- 2 -/ rev3 should have bar with all changes and
685 # \- 2 -/ rev3 should have bar with all changes and
686 # should record that bar descends from
686 # should record that bar descends from
687 # bar in rev2 and foo in rev1
687 # bar in rev2 and foo in rev1
688 #
688 #
689 # this allows this merge to succeed:
689 # this allows this merge to succeed:
690 #
690 #
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
693 # \- 2 --- 4 as the merge base
693 # \- 2 --- 4 as the merge base
694 #
694 #
695
695
696 cf = cp[0]
696 cf = cp[0]
697 cr = manifest1.get(cf)
697 cr = manifest1.get(cf)
698 nfp = fp2
698 nfp = fp2
699
699
700 if manifest2: # branch merge
700 if manifest2: # branch merge
701 if fp2 == nullid: # copied on remote side
701 if fp2 == nullid: # copied on remote side
702 if fp1 != nullid or cf in manifest2:
702 if fp1 != nullid or cf in manifest2:
703 cr = manifest2[cf]
703 cr = manifest2[cf]
704 nfp = fp1
704 nfp = fp1
705
705
706 # find source in nearest ancestor if we've lost track
706 # find source in nearest ancestor if we've lost track
707 if not cr:
707 if not cr:
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
709 (fn, cf))
709 (fn, cf))
710 for a in self['.'].ancestors():
710 for a in self['.'].ancestors():
711 if cf in a:
711 if cf in a:
712 cr = a[cf].filenode()
712 cr = a[cf].filenode()
713 break
713 break
714
714
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
716 meta["copy"] = cf
716 meta["copy"] = cf
717 meta["copyrev"] = hex(cr)
717 meta["copyrev"] = hex(cr)
718 fp1, fp2 = nullid, nfp
718 fp1, fp2 = nullid, nfp
719 elif fp2 != nullid:
719 elif fp2 != nullid:
720 # is one parent an ancestor of the other?
720 # is one parent an ancestor of the other?
721 fpa = fl.ancestor(fp1, fp2)
721 fpa = fl.ancestor(fp1, fp2)
722 if fpa == fp1:
722 if fpa == fp1:
723 fp1, fp2 = fp2, nullid
723 fp1, fp2 = fp2, nullid
724 elif fpa == fp2:
724 elif fpa == fp2:
725 fp2 = nullid
725 fp2 = nullid
726
726
727 # is the file unmodified from the parent? report existing entry
727 # is the file unmodified from the parent? report existing entry
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
729 return fp1
729 return fp1
730
730
731 changelist.append(fn)
731 changelist.append(fn)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
733
733
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
735 if p1 is None:
735 if p1 is None:
736 p1, p2 = self.dirstate.parents()
736 p1, p2 = self.dirstate.parents()
737 return self.commit(files=files, text=text, user=user, date=date,
737 return self.commit(files=files, text=text, user=user, date=date,
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
739
739
740 def commit(self, files=None, text="", user=None, date=None,
740 def commit(self, files=None, text="", user=None, date=None,
741 match=None, force=False, force_editor=False,
741 match=None, force=False, force_editor=False,
742 p1=None, p2=None, extra={}, empty_ok=False):
742 p1=None, p2=None, extra={}, empty_ok=False):
743 wlock = lock = None
743 wlock = lock = None
744 if files:
744 if files:
745 files = util.unique(files)
745 files = util.unique(files)
746 try:
746 try:
747 wlock = self.wlock()
747 wlock = self.wlock()
748 lock = self.lock()
748 lock = self.lock()
749 use_dirstate = (p1 is None) # not rawcommit
749 use_dirstate = (p1 is None) # not rawcommit
750
750
751 if use_dirstate:
751 if use_dirstate:
752 p1, p2 = self.dirstate.parents()
752 p1, p2 = self.dirstate.parents()
753 update_dirstate = True
753 update_dirstate = True
754
754
755 if (not force and p2 != nullid and
755 if (not force and p2 != nullid and
756 (match and (match.files() or match.anypats()))):
756 (match and (match.files() or match.anypats()))):
757 raise util.Abort(_('cannot partially commit a merge '
757 raise util.Abort(_('cannot partially commit a merge '
758 '(do not specify files or patterns)'))
758 '(do not specify files or patterns)'))
759
759
760 if files:
760 if files:
761 modified, removed = [], []
761 modified, removed = [], []
762 for f in files:
762 for f in files:
763 s = self.dirstate[f]
763 s = self.dirstate[f]
764 if s in 'nma':
764 if s in 'nma':
765 modified.append(f)
765 modified.append(f)
766 elif s == 'r':
766 elif s == 'r':
767 removed.append(f)
767 removed.append(f)
768 else:
768 else:
769 self.ui.warn(_("%s not tracked!\n") % f)
769 self.ui.warn(_("%s not tracked!\n") % f)
770 changes = [modified, [], removed, [], []]
770 changes = [modified, [], removed, [], []]
771 else:
771 else:
772 changes = self.status(match=match)
772 changes = self.status(match=match)
773 else:
773 else:
774 p1, p2 = p1, p2 or nullid
774 p1, p2 = p1, p2 or nullid
775 update_dirstate = (self.dirstate.parents()[0] == p1)
775 update_dirstate = (self.dirstate.parents()[0] == p1)
776 changes = [files, [], [], [], []]
776 changes = [files, [], [], [], []]
777
777
778 ms = merge_.mergestate(self)
778 ms = merge_.mergestate(self)
779 for f in changes[0]:
779 for f in changes[0]:
780 if f in ms and ms[f] == 'u':
780 if f in ms and ms[f] == 'u':
781 raise util.Abort(_("unresolved merge conflicts "
781 raise util.Abort(_("unresolved merge conflicts "
782 "(see hg resolve)"))
782 "(see hg resolve)"))
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
784 extra, changes)
784 extra, changes)
785 return self._commitctx(wctx, force, force_editor, empty_ok,
785 return self._commitctx(wctx, force, force_editor, empty_ok,
786 use_dirstate, update_dirstate)
786 use_dirstate, update_dirstate)
787 finally:
787 finally:
788 del lock, wlock
788 del lock, wlock
789
789
790 def commitctx(self, ctx):
790 def commitctx(self, ctx):
791 """Add a new revision to current repository.
791 """Add a new revision to current repository.
792
792
793 Revision information is passed in the context.memctx argument.
793 Revision information is passed in the context.memctx argument.
794 commitctx() does not touch the working directory.
794 commitctx() does not touch the working directory.
795 """
795 """
796 wlock = lock = None
796 wlock = lock = None
797 try:
797 try:
798 wlock = self.wlock()
798 wlock = self.wlock()
799 lock = self.lock()
799 lock = self.lock()
800 return self._commitctx(ctx, force=True, force_editor=False,
800 return self._commitctx(ctx, force=True, force_editor=False,
801 empty_ok=True, use_dirstate=False,
801 empty_ok=True, use_dirstate=False,
802 update_dirstate=False)
802 update_dirstate=False)
803 finally:
803 finally:
804 del lock, wlock
804 del lock, wlock
805
805
806 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
806 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
807 use_dirstate=True, update_dirstate=True):
807 use_dirstate=True, update_dirstate=True):
808 tr = None
808 tr = None
809 valid = 0 # don't save the dirstate if this isn't set
809 valid = 0 # don't save the dirstate if this isn't set
810 try:
810 try:
811 commit = util.sort(wctx.modified() + wctx.added())
811 commit = util.sort(wctx.modified() + wctx.added())
812 remove = wctx.removed()
812 remove = wctx.removed()
813 extra = wctx.extra().copy()
813 extra = wctx.extra().copy()
814 branchname = extra['branch']
814 branchname = extra['branch']
815 user = wctx.user()
815 user = wctx.user()
816 text = wctx.description()
816 text = wctx.description()
817
817
818 p1, p2 = [p.node() for p in wctx.parents()]
818 p1, p2 = [p.node() for p in wctx.parents()]
819 c1 = self.changelog.read(p1)
819 c1 = self.changelog.read(p1)
820 c2 = self.changelog.read(p2)
820 c2 = self.changelog.read(p2)
821 m1 = self.manifest.read(c1[0]).copy()
821 m1 = self.manifest.read(c1[0]).copy()
822 m2 = self.manifest.read(c2[0])
822 m2 = self.manifest.read(c2[0])
823
823
824 if use_dirstate:
824 if use_dirstate:
825 oldname = c1[5].get("branch") # stored in UTF-8
825 oldname = c1[5].get("branch") # stored in UTF-8
826 if (not commit and not remove and not force and p2 == nullid
826 if (not commit and not remove and not force and p2 == nullid
827 and branchname == oldname):
827 and branchname == oldname):
828 self.ui.status(_("nothing changed\n"))
828 self.ui.status(_("nothing changed\n"))
829 return None
829 return None
830
830
831 xp1 = hex(p1)
831 xp1 = hex(p1)
832 if p2 == nullid: xp2 = ''
832 if p2 == nullid: xp2 = ''
833 else: xp2 = hex(p2)
833 else: xp2 = hex(p2)
834
834
835 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
835 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
836
836
837 tr = self.transaction()
837 tr = self.transaction()
838 trp = weakref.proxy(tr)
838 trp = weakref.proxy(tr)
839
839
840 # check in files
840 # check in files
841 new = {}
841 new = {}
842 changed = []
842 changed = []
843 linkrev = len(self)
843 linkrev = len(self)
844 for f in commit:
844 for f in commit:
845 self.ui.note(f + "\n")
845 self.ui.note(f + "\n")
846 try:
846 try:
847 fctx = wctx.filectx(f)
847 fctx = wctx.filectx(f)
848 newflags = fctx.flags()
848 newflags = fctx.flags()
849 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
849 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
850 if ((not changed or changed[-1] != f) and
850 if ((not changed or changed[-1] != f) and
851 m2.get(f) != new[f]):
851 m2.get(f) != new[f]):
852 # mention the file in the changelog if some
852 # mention the file in the changelog if some
853 # flag changed, even if there was no content
853 # flag changed, even if there was no content
854 # change.
854 # change.
855 if m1.flags(f) != newflags:
855 if m1.flags(f) != newflags:
856 changed.append(f)
856 changed.append(f)
857 m1.set(f, newflags)
857 m1.set(f, newflags)
858 if use_dirstate:
858 if use_dirstate:
859 self.dirstate.normal(f)
859 self.dirstate.normal(f)
860
860
861 except (OSError, IOError):
861 except (OSError, IOError):
862 if use_dirstate:
862 if use_dirstate:
863 self.ui.warn(_("trouble committing %s!\n") % f)
863 self.ui.warn(_("trouble committing %s!\n") % f)
864 raise
864 raise
865 else:
865 else:
866 remove.append(f)
866 remove.append(f)
867
867
868 updated, added = [], []
868 updated, added = [], []
869 for f in util.sort(changed):
869 for f in util.sort(changed):
870 if f in m1 or f in m2:
870 if f in m1 or f in m2:
871 updated.append(f)
871 updated.append(f)
872 else:
872 else:
873 added.append(f)
873 added.append(f)
874
874
875 # update manifest
875 # update manifest
876 m1.update(new)
876 m1.update(new)
877 removed = []
877 removed = []
878
878
879 for f in util.sort(remove):
879 for f in util.sort(remove):
880 if f in m1:
880 if f in m1:
881 del m1[f]
881 del m1[f]
882 removed.append(f)
882 removed.append(f)
883 elif f in m2:
883 elif f in m2:
884 removed.append(f)
884 removed.append(f)
885 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
885 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
886 (new, removed))
886 (new, removed))
887
887
888 # add changeset
888 # add changeset
889 if (not empty_ok and not text) or force_editor:
889 if (not empty_ok and not text) or force_editor:
890 edittext = []
890 edittext = []
891 if text:
891 if text:
892 edittext.append(text)
892 edittext.append(text)
893 edittext.append("")
893 edittext.append("")
894 edittext.append("") # Empty line between message and comments.
894 edittext.append("") # Empty line between message and comments.
895 edittext.append(_("HG: Enter commit message."
895 edittext.append(_("HG: Enter commit message."
896 " Lines beginning with 'HG:' are removed."))
896 " Lines beginning with 'HG:' are removed."))
897 edittext.append("HG: --")
897 edittext.append("HG: --")
898 edittext.append("HG: user: %s" % user)
898 edittext.append("HG: user: %s" % user)
899 if p2 != nullid:
899 if p2 != nullid:
900 edittext.append("HG: branch merge")
900 edittext.append("HG: branch merge")
901 if branchname:
901 if branchname:
902 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
902 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
903 edittext.extend(["HG: added %s" % f for f in added])
903 edittext.extend(["HG: added %s" % f for f in added])
904 edittext.extend(["HG: changed %s" % f for f in updated])
904 edittext.extend(["HG: changed %s" % f for f in updated])
905 edittext.extend(["HG: removed %s" % f for f in removed])
905 edittext.extend(["HG: removed %s" % f for f in removed])
906 if not added and not updated and not removed:
906 if not added and not updated and not removed:
907 edittext.append("HG: no files changed")
907 edittext.append("HG: no files changed")
908 edittext.append("")
908 edittext.append("")
909 # run editor in the repository root
909 # run editor in the repository root
910 olddir = os.getcwd()
910 olddir = os.getcwd()
911 os.chdir(self.root)
911 os.chdir(self.root)
912 text = self.ui.edit("\n".join(edittext), user)
912 text = self.ui.edit("\n".join(edittext), user)
913 os.chdir(olddir)
913 os.chdir(olddir)
914
914
915 lines = [line.rstrip() for line in text.rstrip().splitlines()]
915 lines = [line.rstrip() for line in text.rstrip().splitlines()]
916 while lines and not lines[0]:
916 while lines and not lines[0]:
917 del lines[0]
917 del lines[0]
918 if not lines and use_dirstate:
918 if not lines and use_dirstate:
919 raise util.Abort(_("empty commit message"))
919 raise util.Abort(_("empty commit message"))
920 text = '\n'.join(lines)
920 text = '\n'.join(lines)
921
921
922 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
922 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
923 user, wctx.date(), extra)
923 user, wctx.date(), extra)
924 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
924 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
925 parent2=xp2)
925 parent2=xp2)
926 tr.close()
926 tr.close()
927
927
928 if self.branchcache:
928 if self.branchcache:
929 self.branchtags()
929 self.branchtags()
930
930
931 if use_dirstate or update_dirstate:
931 if use_dirstate or update_dirstate:
932 self.dirstate.setparents(n)
932 self.dirstate.setparents(n)
933 if use_dirstate:
933 if use_dirstate:
934 for f in removed:
934 for f in removed:
935 self.dirstate.forget(f)
935 self.dirstate.forget(f)
936 valid = 1 # our dirstate updates are complete
936 valid = 1 # our dirstate updates are complete
937
937
938 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
938 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
939 return n
939 return n
940 finally:
940 finally:
941 if not valid: # don't save our updated dirstate
941 if not valid: # don't save our updated dirstate
942 self.dirstate.invalidate()
942 self.dirstate.invalidate()
943 del tr
943 del tr
944
944
945 def walk(self, match, node=None):
945 def walk(self, match, node=None):
946 '''
946 '''
947 walk recursively through the directory tree or a given
947 walk recursively through the directory tree or a given
948 changeset, finding all files matched by the match
948 changeset, finding all files matched by the match
949 function
949 function
950 '''
950 '''
951 return self[node].walk(match)
951 return self[node].walk(match)
952
952
953 def status(self, node1='.', node2=None, match=None,
953 def status(self, node1='.', node2=None, match=None,
954 ignored=False, clean=False, unknown=False):
954 ignored=False, clean=False, unknown=False):
955 """return status of files between two nodes or node and working directory
955 """return status of files between two nodes or node and working directory
956
956
957 If node1 is None, use the first dirstate parent instead.
957 If node1 is None, use the first dirstate parent instead.
958 If node2 is None, compare node1 with working directory.
958 If node2 is None, compare node1 with working directory.
959 """
959 """
960
960
961 def mfmatches(ctx):
961 def mfmatches(ctx):
962 mf = ctx.manifest().copy()
962 mf = ctx.manifest().copy()
963 for fn in mf.keys():
963 for fn in mf.keys():
964 if not match(fn):
964 if not match(fn):
965 del mf[fn]
965 del mf[fn]
966 return mf
966 return mf
967
967
968 if isinstance(node1, context.changectx):
968 if isinstance(node1, context.changectx):
969 ctx1 = node1
969 ctx1 = node1
970 else:
970 else:
971 ctx1 = self[node1]
971 ctx1 = self[node1]
972 if isinstance(node2, context.changectx):
972 if isinstance(node2, context.changectx):
973 ctx2 = node2
973 ctx2 = node2
974 else:
974 else:
975 ctx2 = self[node2]
975 ctx2 = self[node2]
976
976
977 working = ctx2 == self[None]
977 working = ctx2 == self[None]
978 parentworking = working and ctx1 == self['.']
978 parentworking = working and ctx1 == self['.']
979 match = match or match_.always(self.root, self.getcwd())
979 match = match or match_.always(self.root, self.getcwd())
980 listignored, listclean, listunknown = ignored, clean, unknown
980 listignored, listclean, listunknown = ignored, clean, unknown
981
981
982 # load earliest manifest first for caching reasons
982 # load earliest manifest first for caching reasons
983 if not working and ctx2.rev() < ctx1.rev():
983 if not working and ctx2.rev() < ctx1.rev():
984 ctx2.manifest()
984 ctx2.manifest()
985
985
986 if not parentworking:
986 if not parentworking:
987 def bad(f, msg):
987 def bad(f, msg):
988 if f not in ctx1:
988 if f not in ctx1:
989 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
989 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
990 return False
990 return False
991 match.bad = bad
991 match.bad = bad
992
992
993 if working: # we need to scan the working dir
993 if working: # we need to scan the working dir
994 s = self.dirstate.status(match, listignored, listclean, listunknown)
994 s = self.dirstate.status(match, listignored, listclean, listunknown)
995 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
995 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
996
996
997 # check for any possibly clean files
997 # check for any possibly clean files
998 if parentworking and cmp:
998 if parentworking and cmp:
999 fixup = []
999 fixup = []
1000 # do a full compare of any files that might have changed
1000 # do a full compare of any files that might have changed
1001 for f in cmp:
1001 for f in cmp:
1002 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1002 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1003 or ctx1[f].cmp(ctx2[f].data())):
1003 or ctx1[f].cmp(ctx2[f].data())):
1004 modified.append(f)
1004 modified.append(f)
1005 else:
1005 else:
1006 fixup.append(f)
1006 fixup.append(f)
1007
1007
1008 if listclean:
1008 if listclean:
1009 clean += fixup
1009 clean += fixup
1010
1010
1011 # update dirstate for files that are actually clean
1011 # update dirstate for files that are actually clean
1012 if fixup:
1012 if fixup:
1013 wlock = None
1013 wlock = None
1014 try:
1014 try:
1015 try:
1015 try:
1016 wlock = self.wlock(False)
1016 wlock = self.wlock(False)
1017 for f in fixup:
1017 for f in fixup:
1018 self.dirstate.normal(f)
1018 self.dirstate.normal(f)
1019 except lock.LockException:
1019 except lock.LockException:
1020 pass
1020 pass
1021 finally:
1021 finally:
1022 del wlock
1022 del wlock
1023
1023
1024 if not parentworking:
1024 if not parentworking:
1025 mf1 = mfmatches(ctx1)
1025 mf1 = mfmatches(ctx1)
1026 if working:
1026 if working:
1027 # we are comparing working dir against non-parent
1027 # we are comparing working dir against non-parent
1028 # generate a pseudo-manifest for the working dir
1028 # generate a pseudo-manifest for the working dir
1029 mf2 = mfmatches(self['.'])
1029 mf2 = mfmatches(self['.'])
1030 for f in cmp + modified + added:
1030 for f in cmp + modified + added:
1031 mf2[f] = None
1031 mf2[f] = None
1032 mf2.set(f, ctx2.flags(f))
1032 mf2.set(f, ctx2.flags(f))
1033 for f in removed:
1033 for f in removed:
1034 if f in mf2:
1034 if f in mf2:
1035 del mf2[f]
1035 del mf2[f]
1036 else:
1036 else:
1037 # we are comparing two revisions
1037 # we are comparing two revisions
1038 deleted, unknown, ignored = [], [], []
1038 deleted, unknown, ignored = [], [], []
1039 mf2 = mfmatches(ctx2)
1039 mf2 = mfmatches(ctx2)
1040
1040
1041 modified, added, clean = [], [], []
1041 modified, added, clean = [], [], []
1042 for fn in mf2:
1042 for fn in mf2:
1043 if fn in mf1:
1043 if fn in mf1:
1044 if (mf1.flags(fn) != mf2.flags(fn) or
1044 if (mf1.flags(fn) != mf2.flags(fn) or
1045 (mf1[fn] != mf2[fn] and
1045 (mf1[fn] != mf2[fn] and
1046 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1046 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1047 modified.append(fn)
1047 modified.append(fn)
1048 elif listclean:
1048 elif listclean:
1049 clean.append(fn)
1049 clean.append(fn)
1050 del mf1[fn]
1050 del mf1[fn]
1051 else:
1051 else:
1052 added.append(fn)
1052 added.append(fn)
1053 removed = mf1.keys()
1053 removed = mf1.keys()
1054
1054
1055 r = modified, added, removed, deleted, unknown, ignored, clean
1055 r = modified, added, removed, deleted, unknown, ignored, clean
1056 [l.sort() for l in r]
1056 [l.sort() for l in r]
1057 return r
1057 return r
1058
1058
1059 def add(self, list):
1059 def add(self, list):
1060 wlock = self.wlock()
1060 wlock = self.wlock()
1061 try:
1061 try:
1062 rejected = []
1062 rejected = []
1063 for f in list:
1063 for f in list:
1064 p = self.wjoin(f)
1064 p = self.wjoin(f)
1065 try:
1065 try:
1066 st = os.lstat(p)
1066 st = os.lstat(p)
1067 except:
1067 except:
1068 self.ui.warn(_("%s does not exist!\n") % f)
1068 self.ui.warn(_("%s does not exist!\n") % f)
1069 rejected.append(f)
1069 rejected.append(f)
1070 continue
1070 continue
1071 if st.st_size > 10000000:
1071 if st.st_size > 10000000:
1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1073 " performance problems\n"
1073 " performance problems\n"
1074 "(use 'hg revert %s' to unadd the file)\n")
1074 "(use 'hg revert %s' to unadd the file)\n")
1075 % (f, f))
1075 % (f, f))
1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1077 self.ui.warn(_("%s not added: only files and symlinks "
1077 self.ui.warn(_("%s not added: only files and symlinks "
1078 "supported currently\n") % f)
1078 "supported currently\n") % f)
1079 rejected.append(p)
1079 rejected.append(p)
1080 elif self.dirstate[f] in 'amn':
1080 elif self.dirstate[f] in 'amn':
1081 self.ui.warn(_("%s already tracked!\n") % f)
1081 self.ui.warn(_("%s already tracked!\n") % f)
1082 elif self.dirstate[f] == 'r':
1082 elif self.dirstate[f] == 'r':
1083 self.dirstate.normallookup(f)
1083 self.dirstate.normallookup(f)
1084 else:
1084 else:
1085 self.dirstate.add(f)
1085 self.dirstate.add(f)
1086 return rejected
1086 return rejected
1087 finally:
1087 finally:
1088 del wlock
1088 del wlock
1089
1089
1090 def forget(self, list):
1090 def forget(self, list):
1091 wlock = self.wlock()
1091 wlock = self.wlock()
1092 try:
1092 try:
1093 for f in list:
1093 for f in list:
1094 if self.dirstate[f] != 'a':
1094 if self.dirstate[f] != 'a':
1095 self.ui.warn(_("%s not added!\n") % f)
1095 self.ui.warn(_("%s not added!\n") % f)
1096 else:
1096 else:
1097 self.dirstate.forget(f)
1097 self.dirstate.forget(f)
1098 finally:
1098 finally:
1099 del wlock
1099 del wlock
1100
1100
1101 def remove(self, list, unlink=False):
1101 def remove(self, list, unlink=False):
1102 wlock = None
1102 wlock = None
1103 try:
1103 try:
1104 if unlink:
1104 if unlink:
1105 for f in list:
1105 for f in list:
1106 try:
1106 try:
1107 util.unlink(self.wjoin(f))
1107 util.unlink(self.wjoin(f))
1108 except OSError, inst:
1108 except OSError, inst:
1109 if inst.errno != errno.ENOENT:
1109 if inst.errno != errno.ENOENT:
1110 raise
1110 raise
1111 wlock = self.wlock()
1111 wlock = self.wlock()
1112 for f in list:
1112 for f in list:
1113 if unlink and os.path.exists(self.wjoin(f)):
1113 if unlink and os.path.exists(self.wjoin(f)):
1114 self.ui.warn(_("%s still exists!\n") % f)
1114 self.ui.warn(_("%s still exists!\n") % f)
1115 elif self.dirstate[f] == 'a':
1115 elif self.dirstate[f] == 'a':
1116 self.dirstate.forget(f)
1116 self.dirstate.forget(f)
1117 elif f not in self.dirstate:
1117 elif f not in self.dirstate:
1118 self.ui.warn(_("%s not tracked!\n") % f)
1118 self.ui.warn(_("%s not tracked!\n") % f)
1119 else:
1119 else:
1120 self.dirstate.remove(f)
1120 self.dirstate.remove(f)
1121 finally:
1121 finally:
1122 del wlock
1122 del wlock
1123
1123
1124 def undelete(self, list):
1124 def undelete(self, list):
1125 wlock = None
1125 wlock = None
1126 try:
1126 try:
1127 manifests = [self.manifest.read(self.changelog.read(p)[0])
1127 manifests = [self.manifest.read(self.changelog.read(p)[0])
1128 for p in self.dirstate.parents() if p != nullid]
1128 for p in self.dirstate.parents() if p != nullid]
1129 wlock = self.wlock()
1129 wlock = self.wlock()
1130 for f in list:
1130 for f in list:
1131 if self.dirstate[f] != 'r':
1131 if self.dirstate[f] != 'r':
1132 self.ui.warn(_("%s not removed!\n") % f)
1132 self.ui.warn(_("%s not removed!\n") % f)
1133 else:
1133 else:
1134 m = f in manifests[0] and manifests[0] or manifests[1]
1134 m = f in manifests[0] and manifests[0] or manifests[1]
1135 t = self.file(f).read(m[f])
1135 t = self.file(f).read(m[f])
1136 self.wwrite(f, t, m.flags(f))
1136 self.wwrite(f, t, m.flags(f))
1137 self.dirstate.normal(f)
1137 self.dirstate.normal(f)
1138 finally:
1138 finally:
1139 del wlock
1139 del wlock
1140
1140
1141 def copy(self, source, dest):
1141 def copy(self, source, dest):
1142 wlock = None
1142 wlock = None
1143 try:
1143 try:
1144 p = self.wjoin(dest)
1144 p = self.wjoin(dest)
1145 if not (os.path.exists(p) or os.path.islink(p)):
1145 if not (os.path.exists(p) or os.path.islink(p)):
1146 self.ui.warn(_("%s does not exist!\n") % dest)
1146 self.ui.warn(_("%s does not exist!\n") % dest)
1147 elif not (os.path.isfile(p) or os.path.islink(p)):
1147 elif not (os.path.isfile(p) or os.path.islink(p)):
1148 self.ui.warn(_("copy failed: %s is not a file or a "
1148 self.ui.warn(_("copy failed: %s is not a file or a "
1149 "symbolic link\n") % dest)
1149 "symbolic link\n") % dest)
1150 else:
1150 else:
1151 wlock = self.wlock()
1151 wlock = self.wlock()
1152 if self.dirstate[dest] in '?r':
1152 if self.dirstate[dest] in '?r':
1153 self.dirstate.add(dest)
1153 self.dirstate.add(dest)
1154 self.dirstate.copy(source, dest)
1154 self.dirstate.copy(source, dest)
1155 finally:
1155 finally:
1156 del wlock
1156 del wlock
1157
1157
1158 def heads(self, start=None):
1158 def heads(self, start=None):
1159 heads = self.changelog.heads(start)
1159 heads = self.changelog.heads(start)
1160 # sort the output in rev descending order
1160 # sort the output in rev descending order
1161 heads = [(-self.changelog.rev(h), h) for h in heads]
1161 heads = [(-self.changelog.rev(h), h) for h in heads]
1162 return [n for (r, n) in util.sort(heads)]
1162 return [n for (r, n) in util.sort(heads)]
1163
1163
1164 def branchheads(self, branch=None, start=None):
1164 def branchheads(self, branch=None, start=None):
1165 if branch is None:
1165 if branch is None:
1166 branch = self[None].branch()
1166 branch = self[None].branch()
1167 branches = self.branchtags()
1167 branches = self.branchtags()
1168 if branch not in branches:
1168 if branch not in branches:
1169 return []
1169 return []
1170 # The basic algorithm is this:
1170 # The basic algorithm is this:
1171 #
1171 #
1172 # Start from the branch tip since there are no later revisions that can
1172 # Start from the branch tip since there are no later revisions that can
1173 # possibly be in this branch, and the tip is a guaranteed head.
1173 # possibly be in this branch, and the tip is a guaranteed head.
1174 #
1174 #
1175 # Remember the tip's parents as the first ancestors, since these by
1175 # Remember the tip's parents as the first ancestors, since these by
1176 # definition are not heads.
1176 # definition are not heads.
1177 #
1177 #
1178 # Step backwards from the brach tip through all the revisions. We are
1178 # Step backwards from the brach tip through all the revisions. We are
1179 # guaranteed by the rules of Mercurial that we will now be visiting the
1179 # guaranteed by the rules of Mercurial that we will now be visiting the
1180 # nodes in reverse topological order (children before parents).
1180 # nodes in reverse topological order (children before parents).
1181 #
1181 #
1182 # If a revision is one of the ancestors of a head then we can toss it
1182 # If a revision is one of the ancestors of a head then we can toss it
1183 # out of the ancestors set (we've already found it and won't be
1183 # out of the ancestors set (we've already found it and won't be
1184 # visiting it again) and put its parents in the ancestors set.
1184 # visiting it again) and put its parents in the ancestors set.
1185 #
1185 #
1186 # Otherwise, if a revision is in the branch it's another head, since it
1186 # Otherwise, if a revision is in the branch it's another head, since it
1187 # wasn't in the ancestor list of an existing head. So add it to the
1187 # wasn't in the ancestor list of an existing head. So add it to the
1188 # head list, and add its parents to the ancestor list.
1188 # head list, and add its parents to the ancestor list.
1189 #
1189 #
1190 # If it is not in the branch ignore it.
1190 # If it is not in the branch ignore it.
1191 #
1191 #
1192 # Once we have a list of heads, use nodesbetween to filter out all the
1192 # Once we have a list of heads, use nodesbetween to filter out all the
1193 # heads that cannot be reached from startrev. There may be a more
1193 # heads that cannot be reached from startrev. There may be a more
1194 # efficient way to do this as part of the previous algorithm.
1194 # efficient way to do this as part of the previous algorithm.
1195
1195
1196 set = util.set
1196 set = util.set
1197 heads = [self.changelog.rev(branches[branch])]
1197 heads = [self.changelog.rev(branches[branch])]
1198 # Don't care if ancestors contains nullrev or not.
1198 # Don't care if ancestors contains nullrev or not.
1199 ancestors = set(self.changelog.parentrevs(heads[0]))
1199 ancestors = set(self.changelog.parentrevs(heads[0]))
1200 for rev in xrange(heads[0] - 1, nullrev, -1):
1200 for rev in xrange(heads[0] - 1, nullrev, -1):
1201 if rev in ancestors:
1201 if rev in ancestors:
1202 ancestors.update(self.changelog.parentrevs(rev))
1202 ancestors.update(self.changelog.parentrevs(rev))
1203 ancestors.remove(rev)
1203 ancestors.remove(rev)
1204 elif self[rev].branch() == branch:
1204 elif self[rev].branch() == branch:
1205 heads.append(rev)
1205 heads.append(rev)
1206 ancestors.update(self.changelog.parentrevs(rev))
1206 ancestors.update(self.changelog.parentrevs(rev))
1207 heads = [self.changelog.node(rev) for rev in heads]
1207 heads = [self.changelog.node(rev) for rev in heads]
1208 if start is not None:
1208 if start is not None:
1209 heads = self.changelog.nodesbetween([start], heads)[2]
1209 heads = self.changelog.nodesbetween([start], heads)[2]
1210 return heads
1210 return heads
1211
1211
1212 def branches(self, nodes):
1212 def branches(self, nodes):
1213 if not nodes:
1213 if not nodes:
1214 nodes = [self.changelog.tip()]
1214 nodes = [self.changelog.tip()]
1215 b = []
1215 b = []
1216 for n in nodes:
1216 for n in nodes:
1217 t = n
1217 t = n
1218 while 1:
1218 while 1:
1219 p = self.changelog.parents(n)
1219 p = self.changelog.parents(n)
1220 if p[1] != nullid or p[0] == nullid:
1220 if p[1] != nullid or p[0] == nullid:
1221 b.append((t, n, p[0], p[1]))
1221 b.append((t, n, p[0], p[1]))
1222 break
1222 break
1223 n = p[0]
1223 n = p[0]
1224 return b
1224 return b
1225
1225
1226 def between(self, pairs):
1226 def between(self, pairs):
1227 r = []
1227 r = []
1228
1228
1229 for top, bottom in pairs:
1229 for top, bottom in pairs:
1230 n, l, i = top, [], 0
1230 n, l, i = top, [], 0
1231 f = 1
1231 f = 1
1232
1232
1233 while n != bottom:
1233 while n != bottom:
1234 p = self.changelog.parents(n)[0]
1234 p = self.changelog.parents(n)[0]
1235 if i == f:
1235 if i == f:
1236 l.append(n)
1236 l.append(n)
1237 f = f * 2
1237 f = f * 2
1238 n = p
1238 n = p
1239 i += 1
1239 i += 1
1240
1240
1241 r.append(l)
1241 r.append(l)
1242
1242
1243 return r
1243 return r
1244
1244
1245 def findincoming(self, remote, base=None, heads=None, force=False):
1245 def findincoming(self, remote, base=None, heads=None, force=False):
1246 """Return list of roots of the subsets of missing nodes from remote
1246 """Return list of roots of the subsets of missing nodes from remote
1247
1247
1248 If base dict is specified, assume that these nodes and their parents
1248 If base dict is specified, assume that these nodes and their parents
1249 exist on the remote side and that no child of a node of base exists
1249 exist on the remote side and that no child of a node of base exists
1250 in both remote and self.
1250 in both remote and self.
1251 Furthermore base will be updated to include the nodes that exists
1251 Furthermore base will be updated to include the nodes that exists
1252 in self and remote but no children exists in self and remote.
1252 in self and remote but no children exists in self and remote.
1253 If a list of heads is specified, return only nodes which are heads
1253 If a list of heads is specified, return only nodes which are heads
1254 or ancestors of these heads.
1254 or ancestors of these heads.
1255
1255
1256 All the ancestors of base are in self and in remote.
1256 All the ancestors of base are in self and in remote.
1257 All the descendants of the list returned are missing in self.
1257 All the descendants of the list returned are missing in self.
1258 (and so we know that the rest of the nodes are missing in remote, see
1258 (and so we know that the rest of the nodes are missing in remote, see
1259 outgoing)
1259 outgoing)
1260 """
1260 """
1261 m = self.changelog.nodemap
1261 m = self.changelog.nodemap
1262 search = []
1262 search = []
1263 fetch = {}
1263 fetch = {}
1264 seen = {}
1264 seen = {}
1265 seenbranch = {}
1265 seenbranch = {}
1266 if base == None:
1266 if base == None:
1267 base = {}
1267 base = {}
1268
1268
1269 if not heads:
1269 if not heads:
1270 heads = remote.heads()
1270 heads = remote.heads()
1271
1271
1272 if self.changelog.tip() == nullid:
1272 if self.changelog.tip() == nullid:
1273 base[nullid] = 1
1273 base[nullid] = 1
1274 if heads != [nullid]:
1274 if heads != [nullid]:
1275 return [nullid]
1275 return [nullid]
1276 return []
1276 return []
1277
1277
1278 # assume we're closer to the tip than the root
1278 # assume we're closer to the tip than the root
1279 # and start by examining the heads
1279 # and start by examining the heads
1280 self.ui.status(_("searching for changes\n"))
1280 self.ui.status(_("searching for changes\n"))
1281
1281
1282 unknown = []
1282 unknown = []
1283 for h in heads:
1283 for h in heads:
1284 if h not in m:
1284 if h not in m:
1285 unknown.append(h)
1285 unknown.append(h)
1286 else:
1286 else:
1287 base[h] = 1
1287 base[h] = 1
1288
1288
1289 if not unknown:
1289 if not unknown:
1290 return []
1290 return []
1291
1291
1292 req = dict.fromkeys(unknown)
1292 req = dict.fromkeys(unknown)
1293 reqcnt = 0
1293 reqcnt = 0
1294
1294
1295 # search through remote branches
1295 # search through remote branches
1296 # a 'branch' here is a linear segment of history, with four parts:
1296 # a 'branch' here is a linear segment of history, with four parts:
1297 # head, root, first parent, second parent
1297 # head, root, first parent, second parent
1298 # (a branch always has two parents (or none) by definition)
1298 # (a branch always has two parents (or none) by definition)
1299 unknown = remote.branches(unknown)
1299 unknown = remote.branches(unknown)
1300 while unknown:
1300 while unknown:
1301 r = []
1301 r = []
1302 while unknown:
1302 while unknown:
1303 n = unknown.pop(0)
1303 n = unknown.pop(0)
1304 if n[0] in seen:
1304 if n[0] in seen:
1305 continue
1305 continue
1306
1306
1307 self.ui.debug(_("examining %s:%s\n")
1307 self.ui.debug(_("examining %s:%s\n")
1308 % (short(n[0]), short(n[1])))
1308 % (short(n[0]), short(n[1])))
1309 if n[0] == nullid: # found the end of the branch
1309 if n[0] == nullid: # found the end of the branch
1310 pass
1310 pass
1311 elif n in seenbranch:
1311 elif n in seenbranch:
1312 self.ui.debug(_("branch already found\n"))
1312 self.ui.debug(_("branch already found\n"))
1313 continue
1313 continue
1314 elif n[1] and n[1] in m: # do we know the base?
1314 elif n[1] and n[1] in m: # do we know the base?
1315 self.ui.debug(_("found incomplete branch %s:%s\n")
1315 self.ui.debug(_("found incomplete branch %s:%s\n")
1316 % (short(n[0]), short(n[1])))
1316 % (short(n[0]), short(n[1])))
1317 search.append(n) # schedule branch range for scanning
1317 search.append(n) # schedule branch range for scanning
1318 seenbranch[n] = 1
1318 seenbranch[n] = 1
1319 else:
1319 else:
1320 if n[1] not in seen and n[1] not in fetch:
1320 if n[1] not in seen and n[1] not in fetch:
1321 if n[2] in m and n[3] in m:
1321 if n[2] in m and n[3] in m:
1322 self.ui.debug(_("found new changeset %s\n") %
1322 self.ui.debug(_("found new changeset %s\n") %
1323 short(n[1]))
1323 short(n[1]))
1324 fetch[n[1]] = 1 # earliest unknown
1324 fetch[n[1]] = 1 # earliest unknown
1325 for p in n[2:4]:
1325 for p in n[2:4]:
1326 if p in m:
1326 if p in m:
1327 base[p] = 1 # latest known
1327 base[p] = 1 # latest known
1328
1328
1329 for p in n[2:4]:
1329 for p in n[2:4]:
1330 if p not in req and p not in m:
1330 if p not in req and p not in m:
1331 r.append(p)
1331 r.append(p)
1332 req[p] = 1
1332 req[p] = 1
1333 seen[n[0]] = 1
1333 seen[n[0]] = 1
1334
1334
1335 if r:
1335 if r:
1336 reqcnt += 1
1336 reqcnt += 1
1337 self.ui.debug(_("request %d: %s\n") %
1337 self.ui.debug(_("request %d: %s\n") %
1338 (reqcnt, " ".join(map(short, r))))
1338 (reqcnt, " ".join(map(short, r))))
1339 for p in xrange(0, len(r), 10):
1339 for p in xrange(0, len(r), 10):
1340 for b in remote.branches(r[p:p+10]):
1340 for b in remote.branches(r[p:p+10]):
1341 self.ui.debug(_("received %s:%s\n") %
1341 self.ui.debug(_("received %s:%s\n") %
1342 (short(b[0]), short(b[1])))
1342 (short(b[0]), short(b[1])))
1343 unknown.append(b)
1343 unknown.append(b)
1344
1344
1345 # do binary search on the branches we found
1345 # do binary search on the branches we found
1346 search = [(t, b) for (t, b, p1, p2) in search]
1346 while search:
1347 while search:
1347 n = search.pop(0)
1348 newsearch = []
1348 reqcnt += 1
1349 reqcnt += 1
1349 l = remote.between([(n[0], n[1])])[0]
1350 for n, l in zip(search, remote.between(search)):
1350 l.append(n[1])
1351 l.append(n[1])
1351 p = n[0]
1352 p = n[0]
1352 f = 1
1353 f = 1
1353 for i in l:
1354 for i in l:
1354 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1355 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1355 if i in m:
1356 if i in m:
1356 if f <= 2:
1357 if f <= 2:
1357 self.ui.debug(_("found new branch changeset %s\n") %
1358 self.ui.debug(_("found new branch changeset %s\n") %
1358 short(p))
1359 short(p))
1359 fetch[p] = 1
1360 fetch[p] = 1
1360 base[i] = 1
1361 base[i] = 1
1361 else:
1362 else:
1362 self.ui.debug(_("narrowed branch search to %s:%s\n")
1363 self.ui.debug(_("narrowed branch search to %s:%s\n")
1363 % (short(p), short(i)))
1364 % (short(p), short(i)))
1364 search.append((p, i))
1365 newsearch.append((p, i))
1365 break
1366 break
1366 p, f = i, f * 2
1367 p, f = i, f * 2
1368 search = newsearch
1367
1369
1368 # sanity check our fetch list
1370 # sanity check our fetch list
1369 for f in fetch.keys():
1371 for f in fetch.keys():
1370 if f in m:
1372 if f in m:
1371 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1373 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1372
1374
1373 if base.keys() == [nullid]:
1375 if base.keys() == [nullid]:
1374 if force:
1376 if force:
1375 self.ui.warn(_("warning: repository is unrelated\n"))
1377 self.ui.warn(_("warning: repository is unrelated\n"))
1376 else:
1378 else:
1377 raise util.Abort(_("repository is unrelated"))
1379 raise util.Abort(_("repository is unrelated"))
1378
1380
1379 self.ui.debug(_("found new changesets starting at ") +
1381 self.ui.debug(_("found new changesets starting at ") +
1380 " ".join([short(f) for f in fetch]) + "\n")
1382 " ".join([short(f) for f in fetch]) + "\n")
1381
1383
1382 self.ui.debug(_("%d total queries\n") % reqcnt)
1384 self.ui.debug(_("%d total queries\n") % reqcnt)
1383
1385
1384 return fetch.keys()
1386 return fetch.keys()
1385
1387
1386 def findoutgoing(self, remote, base=None, heads=None, force=False):
1388 def findoutgoing(self, remote, base=None, heads=None, force=False):
1387 """Return list of nodes that are roots of subsets not in remote
1389 """Return list of nodes that are roots of subsets not in remote
1388
1390
1389 If base dict is specified, assume that these nodes and their parents
1391 If base dict is specified, assume that these nodes and their parents
1390 exist on the remote side.
1392 exist on the remote side.
1391 If a list of heads is specified, return only nodes which are heads
1393 If a list of heads is specified, return only nodes which are heads
1392 or ancestors of these heads, and return a second element which
1394 or ancestors of these heads, and return a second element which
1393 contains all remote heads which get new children.
1395 contains all remote heads which get new children.
1394 """
1396 """
1395 if base == None:
1397 if base == None:
1396 base = {}
1398 base = {}
1397 self.findincoming(remote, base, heads, force=force)
1399 self.findincoming(remote, base, heads, force=force)
1398
1400
1399 self.ui.debug(_("common changesets up to ")
1401 self.ui.debug(_("common changesets up to ")
1400 + " ".join(map(short, base.keys())) + "\n")
1402 + " ".join(map(short, base.keys())) + "\n")
1401
1403
1402 remain = dict.fromkeys(self.changelog.nodemap)
1404 remain = dict.fromkeys(self.changelog.nodemap)
1403
1405
1404 # prune everything remote has from the tree
1406 # prune everything remote has from the tree
1405 del remain[nullid]
1407 del remain[nullid]
1406 remove = base.keys()
1408 remove = base.keys()
1407 while remove:
1409 while remove:
1408 n = remove.pop(0)
1410 n = remove.pop(0)
1409 if n in remain:
1411 if n in remain:
1410 del remain[n]
1412 del remain[n]
1411 for p in self.changelog.parents(n):
1413 for p in self.changelog.parents(n):
1412 remove.append(p)
1414 remove.append(p)
1413
1415
1414 # find every node whose parents have been pruned
1416 # find every node whose parents have been pruned
1415 subset = []
1417 subset = []
1416 # find every remote head that will get new children
1418 # find every remote head that will get new children
1417 updated_heads = {}
1419 updated_heads = {}
1418 for n in remain:
1420 for n in remain:
1419 p1, p2 = self.changelog.parents(n)
1421 p1, p2 = self.changelog.parents(n)
1420 if p1 not in remain and p2 not in remain:
1422 if p1 not in remain and p2 not in remain:
1421 subset.append(n)
1423 subset.append(n)
1422 if heads:
1424 if heads:
1423 if p1 in heads:
1425 if p1 in heads:
1424 updated_heads[p1] = True
1426 updated_heads[p1] = True
1425 if p2 in heads:
1427 if p2 in heads:
1426 updated_heads[p2] = True
1428 updated_heads[p2] = True
1427
1429
1428 # this is the set of all roots we have to push
1430 # this is the set of all roots we have to push
1429 if heads:
1431 if heads:
1430 return subset, updated_heads.keys()
1432 return subset, updated_heads.keys()
1431 else:
1433 else:
1432 return subset
1434 return subset
1433
1435
1434 def pull(self, remote, heads=None, force=False):
1436 def pull(self, remote, heads=None, force=False):
1435 lock = self.lock()
1437 lock = self.lock()
1436 try:
1438 try:
1437 fetch = self.findincoming(remote, heads=heads, force=force)
1439 fetch = self.findincoming(remote, heads=heads, force=force)
1438 if fetch == [nullid]:
1440 if fetch == [nullid]:
1439 self.ui.status(_("requesting all changes\n"))
1441 self.ui.status(_("requesting all changes\n"))
1440
1442
1441 if not fetch:
1443 if not fetch:
1442 self.ui.status(_("no changes found\n"))
1444 self.ui.status(_("no changes found\n"))
1443 return 0
1445 return 0
1444
1446
1445 if heads is None:
1447 if heads is None:
1446 cg = remote.changegroup(fetch, 'pull')
1448 cg = remote.changegroup(fetch, 'pull')
1447 else:
1449 else:
1448 if 'changegroupsubset' not in remote.capabilities:
1450 if 'changegroupsubset' not in remote.capabilities:
1449 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1451 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1450 cg = remote.changegroupsubset(fetch, heads, 'pull')
1452 cg = remote.changegroupsubset(fetch, heads, 'pull')
1451 return self.addchangegroup(cg, 'pull', remote.url())
1453 return self.addchangegroup(cg, 'pull', remote.url())
1452 finally:
1454 finally:
1453 del lock
1455 del lock
1454
1456
1455 def push(self, remote, force=False, revs=None):
1457 def push(self, remote, force=False, revs=None):
1456 # there are two ways to push to remote repo:
1458 # there are two ways to push to remote repo:
1457 #
1459 #
1458 # addchangegroup assumes local user can lock remote
1460 # addchangegroup assumes local user can lock remote
1459 # repo (local filesystem, old ssh servers).
1461 # repo (local filesystem, old ssh servers).
1460 #
1462 #
1461 # unbundle assumes local user cannot lock remote repo (new ssh
1463 # unbundle assumes local user cannot lock remote repo (new ssh
1462 # servers, http servers).
1464 # servers, http servers).
1463
1465
1464 if remote.capable('unbundle'):
1466 if remote.capable('unbundle'):
1465 return self.push_unbundle(remote, force, revs)
1467 return self.push_unbundle(remote, force, revs)
1466 return self.push_addchangegroup(remote, force, revs)
1468 return self.push_addchangegroup(remote, force, revs)
1467
1469
1468 def prepush(self, remote, force, revs):
1470 def prepush(self, remote, force, revs):
1469 base = {}
1471 base = {}
1470 remote_heads = remote.heads()
1472 remote_heads = remote.heads()
1471 inc = self.findincoming(remote, base, remote_heads, force=force)
1473 inc = self.findincoming(remote, base, remote_heads, force=force)
1472
1474
1473 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1475 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1474 if revs is not None:
1476 if revs is not None:
1475 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1477 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1476 else:
1478 else:
1477 bases, heads = update, self.changelog.heads()
1479 bases, heads = update, self.changelog.heads()
1478
1480
1479 if not bases:
1481 if not bases:
1480 self.ui.status(_("no changes found\n"))
1482 self.ui.status(_("no changes found\n"))
1481 return None, 1
1483 return None, 1
1482 elif not force:
1484 elif not force:
1483 # check if we're creating new remote heads
1485 # check if we're creating new remote heads
1484 # to be a remote head after push, node must be either
1486 # to be a remote head after push, node must be either
1485 # - unknown locally
1487 # - unknown locally
1486 # - a local outgoing head descended from update
1488 # - a local outgoing head descended from update
1487 # - a remote head that's known locally and not
1489 # - a remote head that's known locally and not
1488 # ancestral to an outgoing head
1490 # ancestral to an outgoing head
1489
1491
1490 warn = 0
1492 warn = 0
1491
1493
1492 if remote_heads == [nullid]:
1494 if remote_heads == [nullid]:
1493 warn = 0
1495 warn = 0
1494 elif not revs and len(heads) > len(remote_heads):
1496 elif not revs and len(heads) > len(remote_heads):
1495 warn = 1
1497 warn = 1
1496 else:
1498 else:
1497 newheads = list(heads)
1499 newheads = list(heads)
1498 for r in remote_heads:
1500 for r in remote_heads:
1499 if r in self.changelog.nodemap:
1501 if r in self.changelog.nodemap:
1500 desc = self.changelog.heads(r, heads)
1502 desc = self.changelog.heads(r, heads)
1501 l = [h for h in heads if h in desc]
1503 l = [h for h in heads if h in desc]
1502 if not l:
1504 if not l:
1503 newheads.append(r)
1505 newheads.append(r)
1504 else:
1506 else:
1505 newheads.append(r)
1507 newheads.append(r)
1506 if len(newheads) > len(remote_heads):
1508 if len(newheads) > len(remote_heads):
1507 warn = 1
1509 warn = 1
1508
1510
1509 if warn:
1511 if warn:
1510 self.ui.warn(_("abort: push creates new remote heads!\n"))
1512 self.ui.warn(_("abort: push creates new remote heads!\n"))
1511 self.ui.status(_("(did you forget to merge?"
1513 self.ui.status(_("(did you forget to merge?"
1512 " use push -f to force)\n"))
1514 " use push -f to force)\n"))
1513 return None, 0
1515 return None, 0
1514 elif inc:
1516 elif inc:
1515 self.ui.warn(_("note: unsynced remote changes!\n"))
1517 self.ui.warn(_("note: unsynced remote changes!\n"))
1516
1518
1517
1519
1518 if revs is None:
1520 if revs is None:
1519 cg = self.changegroup(update, 'push')
1521 cg = self.changegroup(update, 'push')
1520 else:
1522 else:
1521 cg = self.changegroupsubset(update, revs, 'push')
1523 cg = self.changegroupsubset(update, revs, 'push')
1522 return cg, remote_heads
1524 return cg, remote_heads
1523
1525
1524 def push_addchangegroup(self, remote, force, revs):
1526 def push_addchangegroup(self, remote, force, revs):
1525 lock = remote.lock()
1527 lock = remote.lock()
1526 try:
1528 try:
1527 ret = self.prepush(remote, force, revs)
1529 ret = self.prepush(remote, force, revs)
1528 if ret[0] is not None:
1530 if ret[0] is not None:
1529 cg, remote_heads = ret
1531 cg, remote_heads = ret
1530 return remote.addchangegroup(cg, 'push', self.url())
1532 return remote.addchangegroup(cg, 'push', self.url())
1531 return ret[1]
1533 return ret[1]
1532 finally:
1534 finally:
1533 del lock
1535 del lock
1534
1536
1535 def push_unbundle(self, remote, force, revs):
1537 def push_unbundle(self, remote, force, revs):
1536 # local repo finds heads on server, finds out what revs it
1538 # local repo finds heads on server, finds out what revs it
1537 # must push. once revs transferred, if server finds it has
1539 # must push. once revs transferred, if server finds it has
1538 # different heads (someone else won commit/push race), server
1540 # different heads (someone else won commit/push race), server
1539 # aborts.
1541 # aborts.
1540
1542
1541 ret = self.prepush(remote, force, revs)
1543 ret = self.prepush(remote, force, revs)
1542 if ret[0] is not None:
1544 if ret[0] is not None:
1543 cg, remote_heads = ret
1545 cg, remote_heads = ret
1544 if force: remote_heads = ['force']
1546 if force: remote_heads = ['force']
1545 return remote.unbundle(cg, remote_heads, 'push')
1547 return remote.unbundle(cg, remote_heads, 'push')
1546 return ret[1]
1548 return ret[1]
1547
1549
1548 def changegroupinfo(self, nodes, source):
1550 def changegroupinfo(self, nodes, source):
1549 if self.ui.verbose or source == 'bundle':
1551 if self.ui.verbose or source == 'bundle':
1550 self.ui.status(_("%d changesets found\n") % len(nodes))
1552 self.ui.status(_("%d changesets found\n") % len(nodes))
1551 if self.ui.debugflag:
1553 if self.ui.debugflag:
1552 self.ui.debug(_("List of changesets:\n"))
1554 self.ui.debug(_("List of changesets:\n"))
1553 for node in nodes:
1555 for node in nodes:
1554 self.ui.debug("%s\n" % hex(node))
1556 self.ui.debug("%s\n" % hex(node))
1555
1557
1556 def changegroupsubset(self, bases, heads, source, extranodes=None):
1558 def changegroupsubset(self, bases, heads, source, extranodes=None):
1557 """This function generates a changegroup consisting of all the nodes
1559 """This function generates a changegroup consisting of all the nodes
1558 that are descendents of any of the bases, and ancestors of any of
1560 that are descendents of any of the bases, and ancestors of any of
1559 the heads.
1561 the heads.
1560
1562
1561 It is fairly complex as determining which filenodes and which
1563 It is fairly complex as determining which filenodes and which
1562 manifest nodes need to be included for the changeset to be complete
1564 manifest nodes need to be included for the changeset to be complete
1563 is non-trivial.
1565 is non-trivial.
1564
1566
1565 Another wrinkle is doing the reverse, figuring out which changeset in
1567 Another wrinkle is doing the reverse, figuring out which changeset in
1566 the changegroup a particular filenode or manifestnode belongs to.
1568 the changegroup a particular filenode or manifestnode belongs to.
1567
1569
1568 The caller can specify some nodes that must be included in the
1570 The caller can specify some nodes that must be included in the
1569 changegroup using the extranodes argument. It should be a dict
1571 changegroup using the extranodes argument. It should be a dict
1570 where the keys are the filenames (or 1 for the manifest), and the
1572 where the keys are the filenames (or 1 for the manifest), and the
1571 values are lists of (node, linknode) tuples, where node is a wanted
1573 values are lists of (node, linknode) tuples, where node is a wanted
1572 node and linknode is the changelog node that should be transmitted as
1574 node and linknode is the changelog node that should be transmitted as
1573 the linkrev.
1575 the linkrev.
1574 """
1576 """
1575
1577
1576 self.hook('preoutgoing', throw=True, source=source)
1578 self.hook('preoutgoing', throw=True, source=source)
1577
1579
1578 # Set up some initial variables
1580 # Set up some initial variables
1579 # Make it easy to refer to self.changelog
1581 # Make it easy to refer to self.changelog
1580 cl = self.changelog
1582 cl = self.changelog
1581 # msng is short for missing - compute the list of changesets in this
1583 # msng is short for missing - compute the list of changesets in this
1582 # changegroup.
1584 # changegroup.
1583 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1585 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1584 self.changegroupinfo(msng_cl_lst, source)
1586 self.changegroupinfo(msng_cl_lst, source)
1585 # Some bases may turn out to be superfluous, and some heads may be
1587 # Some bases may turn out to be superfluous, and some heads may be
1586 # too. nodesbetween will return the minimal set of bases and heads
1588 # too. nodesbetween will return the minimal set of bases and heads
1587 # necessary to re-create the changegroup.
1589 # necessary to re-create the changegroup.
1588
1590
1589 # Known heads are the list of heads that it is assumed the recipient
1591 # Known heads are the list of heads that it is assumed the recipient
1590 # of this changegroup will know about.
1592 # of this changegroup will know about.
1591 knownheads = {}
1593 knownheads = {}
1592 # We assume that all parents of bases are known heads.
1594 # We assume that all parents of bases are known heads.
1593 for n in bases:
1595 for n in bases:
1594 for p in cl.parents(n):
1596 for p in cl.parents(n):
1595 if p != nullid:
1597 if p != nullid:
1596 knownheads[p] = 1
1598 knownheads[p] = 1
1597 knownheads = knownheads.keys()
1599 knownheads = knownheads.keys()
1598 if knownheads:
1600 if knownheads:
1599 # Now that we know what heads are known, we can compute which
1601 # Now that we know what heads are known, we can compute which
1600 # changesets are known. The recipient must know about all
1602 # changesets are known. The recipient must know about all
1601 # changesets required to reach the known heads from the null
1603 # changesets required to reach the known heads from the null
1602 # changeset.
1604 # changeset.
1603 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1605 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1604 junk = None
1606 junk = None
1605 # Transform the list into an ersatz set.
1607 # Transform the list into an ersatz set.
1606 has_cl_set = dict.fromkeys(has_cl_set)
1608 has_cl_set = dict.fromkeys(has_cl_set)
1607 else:
1609 else:
1608 # If there were no known heads, the recipient cannot be assumed to
1610 # If there were no known heads, the recipient cannot be assumed to
1609 # know about any changesets.
1611 # know about any changesets.
1610 has_cl_set = {}
1612 has_cl_set = {}
1611
1613
1612 # Make it easy to refer to self.manifest
1614 # Make it easy to refer to self.manifest
1613 mnfst = self.manifest
1615 mnfst = self.manifest
1614 # We don't know which manifests are missing yet
1616 # We don't know which manifests are missing yet
1615 msng_mnfst_set = {}
1617 msng_mnfst_set = {}
1616 # Nor do we know which filenodes are missing.
1618 # Nor do we know which filenodes are missing.
1617 msng_filenode_set = {}
1619 msng_filenode_set = {}
1618
1620
1619 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1621 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1620 junk = None
1622 junk = None
1621
1623
1622 # A changeset always belongs to itself, so the changenode lookup
1624 # A changeset always belongs to itself, so the changenode lookup
1623 # function for a changenode is identity.
1625 # function for a changenode is identity.
1624 def identity(x):
1626 def identity(x):
1625 return x
1627 return x
1626
1628
1627 # A function generating function. Sets up an environment for the
1629 # A function generating function. Sets up an environment for the
1628 # inner function.
1630 # inner function.
1629 def cmp_by_rev_func(revlog):
1631 def cmp_by_rev_func(revlog):
1630 # Compare two nodes by their revision number in the environment's
1632 # Compare two nodes by their revision number in the environment's
1631 # revision history. Since the revision number both represents the
1633 # revision history. Since the revision number both represents the
1632 # most efficient order to read the nodes in, and represents a
1634 # most efficient order to read the nodes in, and represents a
1633 # topological sorting of the nodes, this function is often useful.
1635 # topological sorting of the nodes, this function is often useful.
1634 def cmp_by_rev(a, b):
1636 def cmp_by_rev(a, b):
1635 return cmp(revlog.rev(a), revlog.rev(b))
1637 return cmp(revlog.rev(a), revlog.rev(b))
1636 return cmp_by_rev
1638 return cmp_by_rev
1637
1639
1638 # If we determine that a particular file or manifest node must be a
1640 # If we determine that a particular file or manifest node must be a
1639 # node that the recipient of the changegroup will already have, we can
1641 # node that the recipient of the changegroup will already have, we can
1640 # also assume the recipient will have all the parents. This function
1642 # also assume the recipient will have all the parents. This function
1641 # prunes them from the set of missing nodes.
1643 # prunes them from the set of missing nodes.
1642 def prune_parents(revlog, hasset, msngset):
1644 def prune_parents(revlog, hasset, msngset):
1643 haslst = hasset.keys()
1645 haslst = hasset.keys()
1644 haslst.sort(cmp_by_rev_func(revlog))
1646 haslst.sort(cmp_by_rev_func(revlog))
1645 for node in haslst:
1647 for node in haslst:
1646 parentlst = [p for p in revlog.parents(node) if p != nullid]
1648 parentlst = [p for p in revlog.parents(node) if p != nullid]
1647 while parentlst:
1649 while parentlst:
1648 n = parentlst.pop()
1650 n = parentlst.pop()
1649 if n not in hasset:
1651 if n not in hasset:
1650 hasset[n] = 1
1652 hasset[n] = 1
1651 p = [p for p in revlog.parents(n) if p != nullid]
1653 p = [p for p in revlog.parents(n) if p != nullid]
1652 parentlst.extend(p)
1654 parentlst.extend(p)
1653 for n in hasset:
1655 for n in hasset:
1654 msngset.pop(n, None)
1656 msngset.pop(n, None)
1655
1657
1656 # This is a function generating function used to set up an environment
1658 # This is a function generating function used to set up an environment
1657 # for the inner function to execute in.
1659 # for the inner function to execute in.
1658 def manifest_and_file_collector(changedfileset):
1660 def manifest_and_file_collector(changedfileset):
1659 # This is an information gathering function that gathers
1661 # This is an information gathering function that gathers
1660 # information from each changeset node that goes out as part of
1662 # information from each changeset node that goes out as part of
1661 # the changegroup. The information gathered is a list of which
1663 # the changegroup. The information gathered is a list of which
1662 # manifest nodes are potentially required (the recipient may
1664 # manifest nodes are potentially required (the recipient may
1663 # already have them) and total list of all files which were
1665 # already have them) and total list of all files which were
1664 # changed in any changeset in the changegroup.
1666 # changed in any changeset in the changegroup.
1665 #
1667 #
1666 # We also remember the first changenode we saw any manifest
1668 # We also remember the first changenode we saw any manifest
1667 # referenced by so we can later determine which changenode 'owns'
1669 # referenced by so we can later determine which changenode 'owns'
1668 # the manifest.
1670 # the manifest.
1669 def collect_manifests_and_files(clnode):
1671 def collect_manifests_and_files(clnode):
1670 c = cl.read(clnode)
1672 c = cl.read(clnode)
1671 for f in c[3]:
1673 for f in c[3]:
1672 # This is to make sure we only have one instance of each
1674 # This is to make sure we only have one instance of each
1673 # filename string for each filename.
1675 # filename string for each filename.
1674 changedfileset.setdefault(f, f)
1676 changedfileset.setdefault(f, f)
1675 msng_mnfst_set.setdefault(c[0], clnode)
1677 msng_mnfst_set.setdefault(c[0], clnode)
1676 return collect_manifests_and_files
1678 return collect_manifests_and_files
1677
1679
1678 # Figure out which manifest nodes (of the ones we think might be part
1680 # Figure out which manifest nodes (of the ones we think might be part
1679 # of the changegroup) the recipient must know about and remove them
1681 # of the changegroup) the recipient must know about and remove them
1680 # from the changegroup.
1682 # from the changegroup.
1681 def prune_manifests():
1683 def prune_manifests():
1682 has_mnfst_set = {}
1684 has_mnfst_set = {}
1683 for n in msng_mnfst_set:
1685 for n in msng_mnfst_set:
1684 # If a 'missing' manifest thinks it belongs to a changenode
1686 # If a 'missing' manifest thinks it belongs to a changenode
1685 # the recipient is assumed to have, obviously the recipient
1687 # the recipient is assumed to have, obviously the recipient
1686 # must have that manifest.
1688 # must have that manifest.
1687 linknode = cl.node(mnfst.linkrev(n))
1689 linknode = cl.node(mnfst.linkrev(n))
1688 if linknode in has_cl_set:
1690 if linknode in has_cl_set:
1689 has_mnfst_set[n] = 1
1691 has_mnfst_set[n] = 1
1690 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1692 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1691
1693
1692 # Use the information collected in collect_manifests_and_files to say
1694 # Use the information collected in collect_manifests_and_files to say
1693 # which changenode any manifestnode belongs to.
1695 # which changenode any manifestnode belongs to.
1694 def lookup_manifest_link(mnfstnode):
1696 def lookup_manifest_link(mnfstnode):
1695 return msng_mnfst_set[mnfstnode]
1697 return msng_mnfst_set[mnfstnode]
1696
1698
1697 # A function generating function that sets up the initial environment
1699 # A function generating function that sets up the initial environment
1698 # the inner function.
1700 # the inner function.
1699 def filenode_collector(changedfiles):
1701 def filenode_collector(changedfiles):
1700 next_rev = [0]
1702 next_rev = [0]
1701 # This gathers information from each manifestnode included in the
1703 # This gathers information from each manifestnode included in the
1702 # changegroup about which filenodes the manifest node references
1704 # changegroup about which filenodes the manifest node references
1703 # so we can include those in the changegroup too.
1705 # so we can include those in the changegroup too.
1704 #
1706 #
1705 # It also remembers which changenode each filenode belongs to. It
1707 # It also remembers which changenode each filenode belongs to. It
1706 # does this by assuming the a filenode belongs to the changenode
1708 # does this by assuming the a filenode belongs to the changenode
1707 # the first manifest that references it belongs to.
1709 # the first manifest that references it belongs to.
1708 def collect_msng_filenodes(mnfstnode):
1710 def collect_msng_filenodes(mnfstnode):
1709 r = mnfst.rev(mnfstnode)
1711 r = mnfst.rev(mnfstnode)
1710 if r == next_rev[0]:
1712 if r == next_rev[0]:
1711 # If the last rev we looked at was the one just previous,
1713 # If the last rev we looked at was the one just previous,
1712 # we only need to see a diff.
1714 # we only need to see a diff.
1713 deltamf = mnfst.readdelta(mnfstnode)
1715 deltamf = mnfst.readdelta(mnfstnode)
1714 # For each line in the delta
1716 # For each line in the delta
1715 for f, fnode in deltamf.items():
1717 for f, fnode in deltamf.items():
1716 f = changedfiles.get(f, None)
1718 f = changedfiles.get(f, None)
1717 # And if the file is in the list of files we care
1719 # And if the file is in the list of files we care
1718 # about.
1720 # about.
1719 if f is not None:
1721 if f is not None:
1720 # Get the changenode this manifest belongs to
1722 # Get the changenode this manifest belongs to
1721 clnode = msng_mnfst_set[mnfstnode]
1723 clnode = msng_mnfst_set[mnfstnode]
1722 # Create the set of filenodes for the file if
1724 # Create the set of filenodes for the file if
1723 # there isn't one already.
1725 # there isn't one already.
1724 ndset = msng_filenode_set.setdefault(f, {})
1726 ndset = msng_filenode_set.setdefault(f, {})
1725 # And set the filenode's changelog node to the
1727 # And set the filenode's changelog node to the
1726 # manifest's if it hasn't been set already.
1728 # manifest's if it hasn't been set already.
1727 ndset.setdefault(fnode, clnode)
1729 ndset.setdefault(fnode, clnode)
1728 else:
1730 else:
1729 # Otherwise we need a full manifest.
1731 # Otherwise we need a full manifest.
1730 m = mnfst.read(mnfstnode)
1732 m = mnfst.read(mnfstnode)
1731 # For every file in we care about.
1733 # For every file in we care about.
1732 for f in changedfiles:
1734 for f in changedfiles:
1733 fnode = m.get(f, None)
1735 fnode = m.get(f, None)
1734 # If it's in the manifest
1736 # If it's in the manifest
1735 if fnode is not None:
1737 if fnode is not None:
1736 # See comments above.
1738 # See comments above.
1737 clnode = msng_mnfst_set[mnfstnode]
1739 clnode = msng_mnfst_set[mnfstnode]
1738 ndset = msng_filenode_set.setdefault(f, {})
1740 ndset = msng_filenode_set.setdefault(f, {})
1739 ndset.setdefault(fnode, clnode)
1741 ndset.setdefault(fnode, clnode)
1740 # Remember the revision we hope to see next.
1742 # Remember the revision we hope to see next.
1741 next_rev[0] = r + 1
1743 next_rev[0] = r + 1
1742 return collect_msng_filenodes
1744 return collect_msng_filenodes
1743
1745
1744 # We have a list of filenodes we think we need for a file, lets remove
1746 # We have a list of filenodes we think we need for a file, lets remove
1745 # all those we now the recipient must have.
1747 # all those we now the recipient must have.
1746 def prune_filenodes(f, filerevlog):
1748 def prune_filenodes(f, filerevlog):
1747 msngset = msng_filenode_set[f]
1749 msngset = msng_filenode_set[f]
1748 hasset = {}
1750 hasset = {}
1749 # If a 'missing' filenode thinks it belongs to a changenode we
1751 # If a 'missing' filenode thinks it belongs to a changenode we
1750 # assume the recipient must have, then the recipient must have
1752 # assume the recipient must have, then the recipient must have
1751 # that filenode.
1753 # that filenode.
1752 for n in msngset:
1754 for n in msngset:
1753 clnode = cl.node(filerevlog.linkrev(n))
1755 clnode = cl.node(filerevlog.linkrev(n))
1754 if clnode in has_cl_set:
1756 if clnode in has_cl_set:
1755 hasset[n] = 1
1757 hasset[n] = 1
1756 prune_parents(filerevlog, hasset, msngset)
1758 prune_parents(filerevlog, hasset, msngset)
1757
1759
1758 # A function generator function that sets up the a context for the
1760 # A function generator function that sets up the a context for the
1759 # inner function.
1761 # inner function.
1760 def lookup_filenode_link_func(fname):
1762 def lookup_filenode_link_func(fname):
1761 msngset = msng_filenode_set[fname]
1763 msngset = msng_filenode_set[fname]
1762 # Lookup the changenode the filenode belongs to.
1764 # Lookup the changenode the filenode belongs to.
1763 def lookup_filenode_link(fnode):
1765 def lookup_filenode_link(fnode):
1764 return msngset[fnode]
1766 return msngset[fnode]
1765 return lookup_filenode_link
1767 return lookup_filenode_link
1766
1768
1767 # Add the nodes that were explicitly requested.
1769 # Add the nodes that were explicitly requested.
1768 def add_extra_nodes(name, nodes):
1770 def add_extra_nodes(name, nodes):
1769 if not extranodes or name not in extranodes:
1771 if not extranodes or name not in extranodes:
1770 return
1772 return
1771
1773
1772 for node, linknode in extranodes[name]:
1774 for node, linknode in extranodes[name]:
1773 if node not in nodes:
1775 if node not in nodes:
1774 nodes[node] = linknode
1776 nodes[node] = linknode
1775
1777
1776 # Now that we have all theses utility functions to help out and
1778 # Now that we have all theses utility functions to help out and
1777 # logically divide up the task, generate the group.
1779 # logically divide up the task, generate the group.
1778 def gengroup():
1780 def gengroup():
1779 # The set of changed files starts empty.
1781 # The set of changed files starts empty.
1780 changedfiles = {}
1782 changedfiles = {}
1781 # Create a changenode group generator that will call our functions
1783 # Create a changenode group generator that will call our functions
1782 # back to lookup the owning changenode and collect information.
1784 # back to lookup the owning changenode and collect information.
1783 group = cl.group(msng_cl_lst, identity,
1785 group = cl.group(msng_cl_lst, identity,
1784 manifest_and_file_collector(changedfiles))
1786 manifest_and_file_collector(changedfiles))
1785 for chnk in group:
1787 for chnk in group:
1786 yield chnk
1788 yield chnk
1787
1789
1788 # The list of manifests has been collected by the generator
1790 # The list of manifests has been collected by the generator
1789 # calling our functions back.
1791 # calling our functions back.
1790 prune_manifests()
1792 prune_manifests()
1791 add_extra_nodes(1, msng_mnfst_set)
1793 add_extra_nodes(1, msng_mnfst_set)
1792 msng_mnfst_lst = msng_mnfst_set.keys()
1794 msng_mnfst_lst = msng_mnfst_set.keys()
1793 # Sort the manifestnodes by revision number.
1795 # Sort the manifestnodes by revision number.
1794 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1796 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1795 # Create a generator for the manifestnodes that calls our lookup
1797 # Create a generator for the manifestnodes that calls our lookup
1796 # and data collection functions back.
1798 # and data collection functions back.
1797 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1799 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1798 filenode_collector(changedfiles))
1800 filenode_collector(changedfiles))
1799 for chnk in group:
1801 for chnk in group:
1800 yield chnk
1802 yield chnk
1801
1803
1802 # These are no longer needed, dereference and toss the memory for
1804 # These are no longer needed, dereference and toss the memory for
1803 # them.
1805 # them.
1804 msng_mnfst_lst = None
1806 msng_mnfst_lst = None
1805 msng_mnfst_set.clear()
1807 msng_mnfst_set.clear()
1806
1808
1807 if extranodes:
1809 if extranodes:
1808 for fname in extranodes:
1810 for fname in extranodes:
1809 if isinstance(fname, int):
1811 if isinstance(fname, int):
1810 continue
1812 continue
1811 msng_filenode_set.setdefault(fname, {})
1813 msng_filenode_set.setdefault(fname, {})
1812 changedfiles[fname] = 1
1814 changedfiles[fname] = 1
1813 # Go through all our files in order sorted by name.
1815 # Go through all our files in order sorted by name.
1814 for fname in util.sort(changedfiles):
1816 for fname in util.sort(changedfiles):
1815 filerevlog = self.file(fname)
1817 filerevlog = self.file(fname)
1816 if not len(filerevlog):
1818 if not len(filerevlog):
1817 raise util.Abort(_("empty or missing revlog for %s") % fname)
1819 raise util.Abort(_("empty or missing revlog for %s") % fname)
1818 # Toss out the filenodes that the recipient isn't really
1820 # Toss out the filenodes that the recipient isn't really
1819 # missing.
1821 # missing.
1820 if fname in msng_filenode_set:
1822 if fname in msng_filenode_set:
1821 prune_filenodes(fname, filerevlog)
1823 prune_filenodes(fname, filerevlog)
1822 add_extra_nodes(fname, msng_filenode_set[fname])
1824 add_extra_nodes(fname, msng_filenode_set[fname])
1823 msng_filenode_lst = msng_filenode_set[fname].keys()
1825 msng_filenode_lst = msng_filenode_set[fname].keys()
1824 else:
1826 else:
1825 msng_filenode_lst = []
1827 msng_filenode_lst = []
1826 # If any filenodes are left, generate the group for them,
1828 # If any filenodes are left, generate the group for them,
1827 # otherwise don't bother.
1829 # otherwise don't bother.
1828 if len(msng_filenode_lst) > 0:
1830 if len(msng_filenode_lst) > 0:
1829 yield changegroup.chunkheader(len(fname))
1831 yield changegroup.chunkheader(len(fname))
1830 yield fname
1832 yield fname
1831 # Sort the filenodes by their revision #
1833 # Sort the filenodes by their revision #
1832 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1834 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1833 # Create a group generator and only pass in a changenode
1835 # Create a group generator and only pass in a changenode
1834 # lookup function as we need to collect no information
1836 # lookup function as we need to collect no information
1835 # from filenodes.
1837 # from filenodes.
1836 group = filerevlog.group(msng_filenode_lst,
1838 group = filerevlog.group(msng_filenode_lst,
1837 lookup_filenode_link_func(fname))
1839 lookup_filenode_link_func(fname))
1838 for chnk in group:
1840 for chnk in group:
1839 yield chnk
1841 yield chnk
1840 if fname in msng_filenode_set:
1842 if fname in msng_filenode_set:
1841 # Don't need this anymore, toss it to free memory.
1843 # Don't need this anymore, toss it to free memory.
1842 del msng_filenode_set[fname]
1844 del msng_filenode_set[fname]
1843 # Signal that no more groups are left.
1845 # Signal that no more groups are left.
1844 yield changegroup.closechunk()
1846 yield changegroup.closechunk()
1845
1847
1846 if msng_cl_lst:
1848 if msng_cl_lst:
1847 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1849 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1848
1850
1849 return util.chunkbuffer(gengroup())
1851 return util.chunkbuffer(gengroup())
1850
1852
1851 def changegroup(self, basenodes, source):
1853 def changegroup(self, basenodes, source):
1852 """Generate a changegroup of all nodes that we have that a recipient
1854 """Generate a changegroup of all nodes that we have that a recipient
1853 doesn't.
1855 doesn't.
1854
1856
1855 This is much easier than the previous function as we can assume that
1857 This is much easier than the previous function as we can assume that
1856 the recipient has any changenode we aren't sending them."""
1858 the recipient has any changenode we aren't sending them."""
1857
1859
1858 self.hook('preoutgoing', throw=True, source=source)
1860 self.hook('preoutgoing', throw=True, source=source)
1859
1861
1860 cl = self.changelog
1862 cl = self.changelog
1861 nodes = cl.nodesbetween(basenodes, None)[0]
1863 nodes = cl.nodesbetween(basenodes, None)[0]
1862 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1864 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1863 self.changegroupinfo(nodes, source)
1865 self.changegroupinfo(nodes, source)
1864
1866
1865 def identity(x):
1867 def identity(x):
1866 return x
1868 return x
1867
1869
1868 def gennodelst(log):
1870 def gennodelst(log):
1869 for r in log:
1871 for r in log:
1870 n = log.node(r)
1872 n = log.node(r)
1871 if log.linkrev(n) in revset:
1873 if log.linkrev(n) in revset:
1872 yield n
1874 yield n
1873
1875
1874 def changed_file_collector(changedfileset):
1876 def changed_file_collector(changedfileset):
1875 def collect_changed_files(clnode):
1877 def collect_changed_files(clnode):
1876 c = cl.read(clnode)
1878 c = cl.read(clnode)
1877 for fname in c[3]:
1879 for fname in c[3]:
1878 changedfileset[fname] = 1
1880 changedfileset[fname] = 1
1879 return collect_changed_files
1881 return collect_changed_files
1880
1882
1881 def lookuprevlink_func(revlog):
1883 def lookuprevlink_func(revlog):
1882 def lookuprevlink(n):
1884 def lookuprevlink(n):
1883 return cl.node(revlog.linkrev(n))
1885 return cl.node(revlog.linkrev(n))
1884 return lookuprevlink
1886 return lookuprevlink
1885
1887
1886 def gengroup():
1888 def gengroup():
1887 # construct a list of all changed files
1889 # construct a list of all changed files
1888 changedfiles = {}
1890 changedfiles = {}
1889
1891
1890 for chnk in cl.group(nodes, identity,
1892 for chnk in cl.group(nodes, identity,
1891 changed_file_collector(changedfiles)):
1893 changed_file_collector(changedfiles)):
1892 yield chnk
1894 yield chnk
1893
1895
1894 mnfst = self.manifest
1896 mnfst = self.manifest
1895 nodeiter = gennodelst(mnfst)
1897 nodeiter = gennodelst(mnfst)
1896 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1898 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1897 yield chnk
1899 yield chnk
1898
1900
1899 for fname in util.sort(changedfiles):
1901 for fname in util.sort(changedfiles):
1900 filerevlog = self.file(fname)
1902 filerevlog = self.file(fname)
1901 if not len(filerevlog):
1903 if not len(filerevlog):
1902 raise util.Abort(_("empty or missing revlog for %s") % fname)
1904 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 nodeiter = gennodelst(filerevlog)
1905 nodeiter = gennodelst(filerevlog)
1904 nodeiter = list(nodeiter)
1906 nodeiter = list(nodeiter)
1905 if nodeiter:
1907 if nodeiter:
1906 yield changegroup.chunkheader(len(fname))
1908 yield changegroup.chunkheader(len(fname))
1907 yield fname
1909 yield fname
1908 lookup = lookuprevlink_func(filerevlog)
1910 lookup = lookuprevlink_func(filerevlog)
1909 for chnk in filerevlog.group(nodeiter, lookup):
1911 for chnk in filerevlog.group(nodeiter, lookup):
1910 yield chnk
1912 yield chnk
1911
1913
1912 yield changegroup.closechunk()
1914 yield changegroup.closechunk()
1913
1915
1914 if nodes:
1916 if nodes:
1915 self.hook('outgoing', node=hex(nodes[0]), source=source)
1917 self.hook('outgoing', node=hex(nodes[0]), source=source)
1916
1918
1917 return util.chunkbuffer(gengroup())
1919 return util.chunkbuffer(gengroup())
1918
1920
1919 def addchangegroup(self, source, srctype, url, emptyok=False):
1921 def addchangegroup(self, source, srctype, url, emptyok=False):
1920 """add changegroup to repo.
1922 """add changegroup to repo.
1921
1923
1922 return values:
1924 return values:
1923 - nothing changed or no source: 0
1925 - nothing changed or no source: 0
1924 - more heads than before: 1+added heads (2..n)
1926 - more heads than before: 1+added heads (2..n)
1925 - less heads than before: -1-removed heads (-2..-n)
1927 - less heads than before: -1-removed heads (-2..-n)
1926 - number of heads stays the same: 1
1928 - number of heads stays the same: 1
1927 """
1929 """
1928 def csmap(x):
1930 def csmap(x):
1929 self.ui.debug(_("add changeset %s\n") % short(x))
1931 self.ui.debug(_("add changeset %s\n") % short(x))
1930 return len(cl)
1932 return len(cl)
1931
1933
1932 def revmap(x):
1934 def revmap(x):
1933 return cl.rev(x)
1935 return cl.rev(x)
1934
1936
1935 if not source:
1937 if not source:
1936 return 0
1938 return 0
1937
1939
1938 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1940 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1939
1941
1940 changesets = files = revisions = 0
1942 changesets = files = revisions = 0
1941
1943
1942 # write changelog data to temp files so concurrent readers will not see
1944 # write changelog data to temp files so concurrent readers will not see
1943 # inconsistent view
1945 # inconsistent view
1944 cl = self.changelog
1946 cl = self.changelog
1945 cl.delayupdate()
1947 cl.delayupdate()
1946 oldheads = len(cl.heads())
1948 oldheads = len(cl.heads())
1947
1949
1948 tr = self.transaction()
1950 tr = self.transaction()
1949 try:
1951 try:
1950 trp = weakref.proxy(tr)
1952 trp = weakref.proxy(tr)
1951 # pull off the changeset group
1953 # pull off the changeset group
1952 self.ui.status(_("adding changesets\n"))
1954 self.ui.status(_("adding changesets\n"))
1953 cor = len(cl) - 1
1955 cor = len(cl) - 1
1954 chunkiter = changegroup.chunkiter(source)
1956 chunkiter = changegroup.chunkiter(source)
1955 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1957 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1956 raise util.Abort(_("received changelog group is empty"))
1958 raise util.Abort(_("received changelog group is empty"))
1957 cnr = len(cl) - 1
1959 cnr = len(cl) - 1
1958 changesets = cnr - cor
1960 changesets = cnr - cor
1959
1961
1960 # pull off the manifest group
1962 # pull off the manifest group
1961 self.ui.status(_("adding manifests\n"))
1963 self.ui.status(_("adding manifests\n"))
1962 chunkiter = changegroup.chunkiter(source)
1964 chunkiter = changegroup.chunkiter(source)
1963 # no need to check for empty manifest group here:
1965 # no need to check for empty manifest group here:
1964 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1966 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1965 # no new manifest will be created and the manifest group will
1967 # no new manifest will be created and the manifest group will
1966 # be empty during the pull
1968 # be empty during the pull
1967 self.manifest.addgroup(chunkiter, revmap, trp)
1969 self.manifest.addgroup(chunkiter, revmap, trp)
1968
1970
1969 # process the files
1971 # process the files
1970 self.ui.status(_("adding file changes\n"))
1972 self.ui.status(_("adding file changes\n"))
1971 while 1:
1973 while 1:
1972 f = changegroup.getchunk(source)
1974 f = changegroup.getchunk(source)
1973 if not f:
1975 if not f:
1974 break
1976 break
1975 self.ui.debug(_("adding %s revisions\n") % f)
1977 self.ui.debug(_("adding %s revisions\n") % f)
1976 fl = self.file(f)
1978 fl = self.file(f)
1977 o = len(fl)
1979 o = len(fl)
1978 chunkiter = changegroup.chunkiter(source)
1980 chunkiter = changegroup.chunkiter(source)
1979 if fl.addgroup(chunkiter, revmap, trp) is None:
1981 if fl.addgroup(chunkiter, revmap, trp) is None:
1980 raise util.Abort(_("received file revlog group is empty"))
1982 raise util.Abort(_("received file revlog group is empty"))
1981 revisions += len(fl) - o
1983 revisions += len(fl) - o
1982 files += 1
1984 files += 1
1983
1985
1984 # make changelog see real files again
1986 # make changelog see real files again
1985 cl.finalize(trp)
1987 cl.finalize(trp)
1986
1988
1987 newheads = len(self.changelog.heads())
1989 newheads = len(self.changelog.heads())
1988 heads = ""
1990 heads = ""
1989 if oldheads and newheads != oldheads:
1991 if oldheads and newheads != oldheads:
1990 heads = _(" (%+d heads)") % (newheads - oldheads)
1992 heads = _(" (%+d heads)") % (newheads - oldheads)
1991
1993
1992 self.ui.status(_("added %d changesets"
1994 self.ui.status(_("added %d changesets"
1993 " with %d changes to %d files%s\n")
1995 " with %d changes to %d files%s\n")
1994 % (changesets, revisions, files, heads))
1996 % (changesets, revisions, files, heads))
1995
1997
1996 if changesets > 0:
1998 if changesets > 0:
1997 self.hook('pretxnchangegroup', throw=True,
1999 self.hook('pretxnchangegroup', throw=True,
1998 node=hex(self.changelog.node(cor+1)), source=srctype,
2000 node=hex(self.changelog.node(cor+1)), source=srctype,
1999 url=url)
2001 url=url)
2000
2002
2001 tr.close()
2003 tr.close()
2002 finally:
2004 finally:
2003 del tr
2005 del tr
2004
2006
2005 if changesets > 0:
2007 if changesets > 0:
2006 # forcefully update the on-disk branch cache
2008 # forcefully update the on-disk branch cache
2007 self.ui.debug(_("updating the branch cache\n"))
2009 self.ui.debug(_("updating the branch cache\n"))
2008 self.branchtags()
2010 self.branchtags()
2009 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2011 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2010 source=srctype, url=url)
2012 source=srctype, url=url)
2011
2013
2012 for i in xrange(cor + 1, cnr + 1):
2014 for i in xrange(cor + 1, cnr + 1):
2013 self.hook("incoming", node=hex(self.changelog.node(i)),
2015 self.hook("incoming", node=hex(self.changelog.node(i)),
2014 source=srctype, url=url)
2016 source=srctype, url=url)
2015
2017
2016 # never return 0 here:
2018 # never return 0 here:
2017 if newheads < oldheads:
2019 if newheads < oldheads:
2018 return newheads - oldheads - 1
2020 return newheads - oldheads - 1
2019 else:
2021 else:
2020 return newheads - oldheads + 1
2022 return newheads - oldheads + 1
2021
2023
2022
2024
2023 def stream_in(self, remote):
2025 def stream_in(self, remote):
2024 fp = remote.stream_out()
2026 fp = remote.stream_out()
2025 l = fp.readline()
2027 l = fp.readline()
2026 try:
2028 try:
2027 resp = int(l)
2029 resp = int(l)
2028 except ValueError:
2030 except ValueError:
2029 raise util.UnexpectedOutput(
2031 raise util.UnexpectedOutput(
2030 _('Unexpected response from remote server:'), l)
2032 _('Unexpected response from remote server:'), l)
2031 if resp == 1:
2033 if resp == 1:
2032 raise util.Abort(_('operation forbidden by server'))
2034 raise util.Abort(_('operation forbidden by server'))
2033 elif resp == 2:
2035 elif resp == 2:
2034 raise util.Abort(_('locking the remote repository failed'))
2036 raise util.Abort(_('locking the remote repository failed'))
2035 elif resp != 0:
2037 elif resp != 0:
2036 raise util.Abort(_('the server sent an unknown error code'))
2038 raise util.Abort(_('the server sent an unknown error code'))
2037 self.ui.status(_('streaming all changes\n'))
2039 self.ui.status(_('streaming all changes\n'))
2038 l = fp.readline()
2040 l = fp.readline()
2039 try:
2041 try:
2040 total_files, total_bytes = map(int, l.split(' ', 1))
2042 total_files, total_bytes = map(int, l.split(' ', 1))
2041 except (ValueError, TypeError):
2043 except (ValueError, TypeError):
2042 raise util.UnexpectedOutput(
2044 raise util.UnexpectedOutput(
2043 _('Unexpected response from remote server:'), l)
2045 _('Unexpected response from remote server:'), l)
2044 self.ui.status(_('%d files to transfer, %s of data\n') %
2046 self.ui.status(_('%d files to transfer, %s of data\n') %
2045 (total_files, util.bytecount(total_bytes)))
2047 (total_files, util.bytecount(total_bytes)))
2046 start = time.time()
2048 start = time.time()
2047 for i in xrange(total_files):
2049 for i in xrange(total_files):
2048 # XXX doesn't support '\n' or '\r' in filenames
2050 # XXX doesn't support '\n' or '\r' in filenames
2049 l = fp.readline()
2051 l = fp.readline()
2050 try:
2052 try:
2051 name, size = l.split('\0', 1)
2053 name, size = l.split('\0', 1)
2052 size = int(size)
2054 size = int(size)
2053 except (ValueError, TypeError):
2055 except (ValueError, TypeError):
2054 raise util.UnexpectedOutput(
2056 raise util.UnexpectedOutput(
2055 _('Unexpected response from remote server:'), l)
2057 _('Unexpected response from remote server:'), l)
2056 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2058 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2057 ofp = self.sopener(name, 'w')
2059 ofp = self.sopener(name, 'w')
2058 for chunk in util.filechunkiter(fp, limit=size):
2060 for chunk in util.filechunkiter(fp, limit=size):
2059 ofp.write(chunk)
2061 ofp.write(chunk)
2060 ofp.close()
2062 ofp.close()
2061 elapsed = time.time() - start
2063 elapsed = time.time() - start
2062 if elapsed <= 0:
2064 if elapsed <= 0:
2063 elapsed = 0.001
2065 elapsed = 0.001
2064 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2066 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2065 (util.bytecount(total_bytes), elapsed,
2067 (util.bytecount(total_bytes), elapsed,
2066 util.bytecount(total_bytes / elapsed)))
2068 util.bytecount(total_bytes / elapsed)))
2067 self.invalidate()
2069 self.invalidate()
2068 return len(self.heads()) + 1
2070 return len(self.heads()) + 1
2069
2071
2070 def clone(self, remote, heads=[], stream=False):
2072 def clone(self, remote, heads=[], stream=False):
2071 '''clone remote repository.
2073 '''clone remote repository.
2072
2074
2073 keyword arguments:
2075 keyword arguments:
2074 heads: list of revs to clone (forces use of pull)
2076 heads: list of revs to clone (forces use of pull)
2075 stream: use streaming clone if possible'''
2077 stream: use streaming clone if possible'''
2076
2078
2077 # now, all clients that can request uncompressed clones can
2079 # now, all clients that can request uncompressed clones can
2078 # read repo formats supported by all servers that can serve
2080 # read repo formats supported by all servers that can serve
2079 # them.
2081 # them.
2080
2082
2081 # if revlog format changes, client will have to check version
2083 # if revlog format changes, client will have to check version
2082 # and format flags on "stream" capability, and use
2084 # and format flags on "stream" capability, and use
2083 # uncompressed only if compatible.
2085 # uncompressed only if compatible.
2084
2086
2085 if stream and not heads and remote.capable('stream'):
2087 if stream and not heads and remote.capable('stream'):
2086 return self.stream_in(remote)
2088 return self.stream_in(remote)
2087 return self.pull(remote, heads)
2089 return self.pull(remote, heads)
2088
2090
2089 # used to avoid circular references so destructors work
2091 # used to avoid circular references so destructors work
2090 def aftertrans(files):
2092 def aftertrans(files):
2091 renamefiles = [tuple(t) for t in files]
2093 renamefiles = [tuple(t) for t in files]
2092 def a():
2094 def a():
2093 for src, dest in renamefiles:
2095 for src, dest in renamefiles:
2094 util.rename(src, dest)
2096 util.rename(src, dest)
2095 return a
2097 return a
2096
2098
2097 def instance(ui, path, create):
2099 def instance(ui, path, create):
2098 return localrepository(ui, util.drop_scheme('file', path), create)
2100 return localrepository(ui, util.drop_scheme('file', path), create)
2099
2101
2100 def islocal(path):
2102 def islocal(path):
2101 return True
2103 return True
General Comments 0
You need to be logged in to leave comments. Login now