##// END OF EJS Templates
automatically update the branch cache when tip changes
Alexis S. L. Carvalho -
r6121:7336aeff default
parent child Browse files
Show More
@@ -1,2105 +1,2117
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
98 self.nodetagscache = None
99 self.nodetagscache = None
99 self.filterpats = {}
100 self.filterpats = {}
100 self._datafilters = {}
101 self._datafilters = {}
101 self._transref = self._lockref = self._wlockref = None
102 self._transref = self._lockref = self._wlockref = None
102
103
103 def __getattr__(self, name):
104 def __getattr__(self, name):
104 if name == 'changelog':
105 if name == 'changelog':
105 self.changelog = changelog.changelog(self.sopener)
106 self.changelog = changelog.changelog(self.sopener)
106 self.sopener.defversion = self.changelog.version
107 self.sopener.defversion = self.changelog.version
107 return self.changelog
108 return self.changelog
108 if name == 'manifest':
109 if name == 'manifest':
109 self.changelog
110 self.changelog
110 self.manifest = manifest.manifest(self.sopener)
111 self.manifest = manifest.manifest(self.sopener)
111 return self.manifest
112 return self.manifest
112 if name == 'dirstate':
113 if name == 'dirstate':
113 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 return self.dirstate
115 return self.dirstate
115 else:
116 else:
116 raise AttributeError, name
117 raise AttributeError, name
117
118
118 def url(self):
119 def url(self):
119 return 'file:' + self.root
120 return 'file:' + self.root
120
121
121 def hook(self, name, throw=False, **args):
122 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
123 return hook.hook(self.ui, self, name, throw, **args)
123
124
124 tag_disallowed = ':\r\n'
125 tag_disallowed = ':\r\n'
125
126
126 def _tag(self, name, node, message, local, user, date, parent=None,
127 def _tag(self, name, node, message, local, user, date, parent=None,
127 extra={}):
128 extra={}):
128 use_dirstate = parent is None
129 use_dirstate = parent is None
129
130
130 for c in self.tag_disallowed:
131 for c in self.tag_disallowed:
131 if c in name:
132 if c in name:
132 raise util.Abort(_('%r cannot be used in a tag name') % c)
133 raise util.Abort(_('%r cannot be used in a tag name') % c)
133
134
134 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
135
136
136 def writetag(fp, name, munge, prevtags):
137 def writetag(fp, name, munge, prevtags):
137 fp.seek(0, 2)
138 fp.seek(0, 2)
138 if prevtags and prevtags[-1] != '\n':
139 if prevtags and prevtags[-1] != '\n':
139 fp.write('\n')
140 fp.write('\n')
140 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
141 fp.close()
142 fp.close()
142
143
143 prevtags = ''
144 prevtags = ''
144 if local:
145 if local:
145 try:
146 try:
146 fp = self.opener('localtags', 'r+')
147 fp = self.opener('localtags', 'r+')
147 except IOError, err:
148 except IOError, err:
148 fp = self.opener('localtags', 'a')
149 fp = self.opener('localtags', 'a')
149 else:
150 else:
150 prevtags = fp.read()
151 prevtags = fp.read()
151
152
152 # local tags are stored in the current charset
153 # local tags are stored in the current charset
153 writetag(fp, name, None, prevtags)
154 writetag(fp, name, None, prevtags)
154 self.hook('tag', node=hex(node), tag=name, local=local)
155 self.hook('tag', node=hex(node), tag=name, local=local)
155 return
156 return
156
157
157 if use_dirstate:
158 if use_dirstate:
158 try:
159 try:
159 fp = self.wfile('.hgtags', 'rb+')
160 fp = self.wfile('.hgtags', 'rb+')
160 except IOError, err:
161 except IOError, err:
161 fp = self.wfile('.hgtags', 'ab')
162 fp = self.wfile('.hgtags', 'ab')
162 else:
163 else:
163 prevtags = fp.read()
164 prevtags = fp.read()
164 else:
165 else:
165 try:
166 try:
166 prevtags = self.filectx('.hgtags', parent).data()
167 prevtags = self.filectx('.hgtags', parent).data()
167 except revlog.LookupError:
168 except revlog.LookupError:
168 pass
169 pass
169 fp = self.wfile('.hgtags', 'wb')
170 fp = self.wfile('.hgtags', 'wb')
170 if prevtags:
171 if prevtags:
171 fp.write(prevtags)
172 fp.write(prevtags)
172
173
173 # committed tags are stored in UTF-8
174 # committed tags are stored in UTF-8
174 writetag(fp, name, util.fromlocal, prevtags)
175 writetag(fp, name, util.fromlocal, prevtags)
175
176
176 if use_dirstate and '.hgtags' not in self.dirstate:
177 if use_dirstate and '.hgtags' not in self.dirstate:
177 self.add(['.hgtags'])
178 self.add(['.hgtags'])
178
179
179 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
180 extra=extra)
181 extra=extra)
181
182
182 self.hook('tag', node=hex(node), tag=name, local=local)
183 self.hook('tag', node=hex(node), tag=name, local=local)
183
184
184 return tagnode
185 return tagnode
185
186
186 def tag(self, name, node, message, local, user, date):
187 def tag(self, name, node, message, local, user, date):
187 '''tag a revision with a symbolic name.
188 '''tag a revision with a symbolic name.
188
189
189 if local is True, the tag is stored in a per-repository file.
190 if local is True, the tag is stored in a per-repository file.
190 otherwise, it is stored in the .hgtags file, and a new
191 otherwise, it is stored in the .hgtags file, and a new
191 changeset is committed with the change.
192 changeset is committed with the change.
192
193
193 keyword arguments:
194 keyword arguments:
194
195
195 local: whether to store tag in non-version-controlled file
196 local: whether to store tag in non-version-controlled file
196 (default False)
197 (default False)
197
198
198 message: commit message to use if committing
199 message: commit message to use if committing
199
200
200 user: name of user to use if committing
201 user: name of user to use if committing
201
202
202 date: date tuple to use if committing'''
203 date: date tuple to use if committing'''
203
204
204 for x in self.status()[:5]:
205 for x in self.status()[:5]:
205 if '.hgtags' in x:
206 if '.hgtags' in x:
206 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
207 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
208
209
209
210
210 self._tag(name, node, message, local, user, date)
211 self._tag(name, node, message, local, user, date)
211
212
212 def tags(self):
213 def tags(self):
213 '''return a mapping of tag to node'''
214 '''return a mapping of tag to node'''
214 if self.tagscache:
215 if self.tagscache:
215 return self.tagscache
216 return self.tagscache
216
217
217 globaltags = {}
218 globaltags = {}
218 tagtypes = {}
219 tagtypes = {}
219
220
220 def readtags(lines, fn, tagtype):
221 def readtags(lines, fn, tagtype):
221 filetags = {}
222 filetags = {}
222 count = 0
223 count = 0
223
224
224 def warn(msg):
225 def warn(msg):
225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226
227
227 for l in lines:
228 for l in lines:
228 count += 1
229 count += 1
229 if not l:
230 if not l:
230 continue
231 continue
231 s = l.split(" ", 1)
232 s = l.split(" ", 1)
232 if len(s) != 2:
233 if len(s) != 2:
233 warn(_("cannot parse entry"))
234 warn(_("cannot parse entry"))
234 continue
235 continue
235 node, key = s
236 node, key = s
236 key = util.tolocal(key.strip()) # stored in UTF-8
237 key = util.tolocal(key.strip()) # stored in UTF-8
237 try:
238 try:
238 bin_n = bin(node)
239 bin_n = bin(node)
239 except TypeError:
240 except TypeError:
240 warn(_("node '%s' is not well formed") % node)
241 warn(_("node '%s' is not well formed") % node)
241 continue
242 continue
242 if bin_n not in self.changelog.nodemap:
243 if bin_n not in self.changelog.nodemap:
243 warn(_("tag '%s' refers to unknown node") % key)
244 warn(_("tag '%s' refers to unknown node") % key)
244 continue
245 continue
245
246
246 h = []
247 h = []
247 if key in filetags:
248 if key in filetags:
248 n, h = filetags[key]
249 n, h = filetags[key]
249 h.append(n)
250 h.append(n)
250 filetags[key] = (bin_n, h)
251 filetags[key] = (bin_n, h)
251
252
252 for k, nh in filetags.items():
253 for k, nh in filetags.items():
253 if k not in globaltags:
254 if k not in globaltags:
254 globaltags[k] = nh
255 globaltags[k] = nh
255 tagtypes[k] = tagtype
256 tagtypes[k] = tagtype
256 continue
257 continue
257
258
258 # we prefer the global tag if:
259 # we prefer the global tag if:
259 # it supercedes us OR
260 # it supercedes us OR
260 # mutual supercedes and it has a higher rank
261 # mutual supercedes and it has a higher rank
261 # otherwise we win because we're tip-most
262 # otherwise we win because we're tip-most
262 an, ah = nh
263 an, ah = nh
263 bn, bh = globaltags[k]
264 bn, bh = globaltags[k]
264 if (bn != an and an in bh and
265 if (bn != an and an in bh and
265 (bn not in ah or len(bh) > len(ah))):
266 (bn not in ah or len(bh) > len(ah))):
266 an = bn
267 an = bn
267 ah.extend([n for n in bh if n not in ah])
268 ah.extend([n for n in bh if n not in ah])
268 globaltags[k] = an, ah
269 globaltags[k] = an, ah
269 tagtypes[k] = tagtype
270 tagtypes[k] = tagtype
270
271
271 # read the tags file from each head, ending with the tip
272 # read the tags file from each head, ending with the tip
272 f = None
273 f = None
273 for rev, node, fnode in self._hgtagsnodes():
274 for rev, node, fnode in self._hgtagsnodes():
274 f = (f and f.filectx(fnode) or
275 f = (f and f.filectx(fnode) or
275 self.filectx('.hgtags', fileid=fnode))
276 self.filectx('.hgtags', fileid=fnode))
276 readtags(f.data().splitlines(), f, "global")
277 readtags(f.data().splitlines(), f, "global")
277
278
278 try:
279 try:
279 data = util.fromlocal(self.opener("localtags").read())
280 data = util.fromlocal(self.opener("localtags").read())
280 # localtags are stored in the local character set
281 # localtags are stored in the local character set
281 # while the internal tag table is stored in UTF-8
282 # while the internal tag table is stored in UTF-8
282 readtags(data.splitlines(), "localtags", "local")
283 readtags(data.splitlines(), "localtags", "local")
283 except IOError:
284 except IOError:
284 pass
285 pass
285
286
286 self.tagscache = {}
287 self.tagscache = {}
287 self._tagstypecache = {}
288 self._tagstypecache = {}
288 for k,nh in globaltags.items():
289 for k,nh in globaltags.items():
289 n = nh[0]
290 n = nh[0]
290 if n != nullid:
291 if n != nullid:
291 self.tagscache[k] = n
292 self.tagscache[k] = n
292 self._tagstypecache[k] = tagtypes[k]
293 self._tagstypecache[k] = tagtypes[k]
293 self.tagscache['tip'] = self.changelog.tip()
294 self.tagscache['tip'] = self.changelog.tip()
294
295
295 return self.tagscache
296 return self.tagscache
296
297
297 def tagtype(self, tagname):
298 def tagtype(self, tagname):
298 '''
299 '''
299 return the type of the given tag. result can be:
300 return the type of the given tag. result can be:
300
301
301 'local' : a local tag
302 'local' : a local tag
302 'global' : a global tag
303 'global' : a global tag
303 None : tag does not exist
304 None : tag does not exist
304 '''
305 '''
305
306
306 self.tags()
307 self.tags()
307
308
308 return self._tagstypecache.get(tagname)
309 return self._tagstypecache.get(tagname)
309
310
310 def _hgtagsnodes(self):
311 def _hgtagsnodes(self):
311 heads = self.heads()
312 heads = self.heads()
312 heads.reverse()
313 heads.reverse()
313 last = {}
314 last = {}
314 ret = []
315 ret = []
315 for node in heads:
316 for node in heads:
316 c = self.changectx(node)
317 c = self.changectx(node)
317 rev = c.rev()
318 rev = c.rev()
318 try:
319 try:
319 fnode = c.filenode('.hgtags')
320 fnode = c.filenode('.hgtags')
320 except revlog.LookupError:
321 except revlog.LookupError:
321 continue
322 continue
322 ret.append((rev, node, fnode))
323 ret.append((rev, node, fnode))
323 if fnode in last:
324 if fnode in last:
324 ret[last[fnode]] = None
325 ret[last[fnode]] = None
325 last[fnode] = len(ret) - 1
326 last[fnode] = len(ret) - 1
326 return [item for item in ret if item]
327 return [item for item in ret if item]
327
328
328 def tagslist(self):
329 def tagslist(self):
329 '''return a list of tags ordered by revision'''
330 '''return a list of tags ordered by revision'''
330 l = []
331 l = []
331 for t, n in self.tags().items():
332 for t, n in self.tags().items():
332 try:
333 try:
333 r = self.changelog.rev(n)
334 r = self.changelog.rev(n)
334 except:
335 except:
335 r = -2 # sort to the beginning of the list if unknown
336 r = -2 # sort to the beginning of the list if unknown
336 l.append((r, t, n))
337 l.append((r, t, n))
337 l.sort()
338 l.sort()
338 return [(t, n) for r, t, n in l]
339 return [(t, n) for r, t, n in l]
339
340
340 def nodetags(self, node):
341 def nodetags(self, node):
341 '''return the tags associated with a node'''
342 '''return the tags associated with a node'''
342 if not self.nodetagscache:
343 if not self.nodetagscache:
343 self.nodetagscache = {}
344 self.nodetagscache = {}
344 for t, n in self.tags().items():
345 for t, n in self.tags().items():
345 self.nodetagscache.setdefault(n, []).append(t)
346 self.nodetagscache.setdefault(n, []).append(t)
346 return self.nodetagscache.get(node, [])
347 return self.nodetagscache.get(node, [])
347
348
348 def _branchtags(self, partial, lrev):
349 def _branchtags(self, partial, lrev):
349 tiprev = self.changelog.count() - 1
350 tiprev = self.changelog.count() - 1
350 if lrev != tiprev:
351 if lrev != tiprev:
351 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353
354
354 return partial
355 return partial
355
356
356 def branchtags(self):
357 def branchtags(self):
357 if self.branchcache is not None:
358 tip = self.changelog.tip()
359 if self.branchcache is not None and self._branchcachetip == tip:
358 return self.branchcache
360 return self.branchcache
359
361
360 self.branchcache = {} # avoid recursion in changectx
362 oldtip = self._branchcachetip
361 partial, last, lrev = self._readbranchcache()
363 self._branchcachetip = tip
364 if self.branchcache is None:
365 self.branchcache = {} # avoid recursion in changectx
366 else:
367 self.branchcache.clear() # keep using the same dict
368 if oldtip is None or oldtip not in self.changelog.nodemap:
369 partial, last, lrev = self._readbranchcache()
370 else:
371 lrev = self.changelog.rev(oldtip)
372 partial = self._ubranchcache
373
362 self._branchtags(partial, lrev)
374 self._branchtags(partial, lrev)
363
375
364 # the branch cache is stored on disk as UTF-8, but in the local
376 # the branch cache is stored on disk as UTF-8, but in the local
365 # charset internally
377 # charset internally
366 for k, v in partial.items():
378 for k, v in partial.items():
367 self.branchcache[util.tolocal(k)] = v
379 self.branchcache[util.tolocal(k)] = v
368 self._ubranchcache = partial
380 self._ubranchcache = partial
369 return self.branchcache
381 return self.branchcache
370
382
371 def _readbranchcache(self):
383 def _readbranchcache(self):
372 partial = {}
384 partial = {}
373 try:
385 try:
374 f = self.opener("branch.cache")
386 f = self.opener("branch.cache")
375 lines = f.read().split('\n')
387 lines = f.read().split('\n')
376 f.close()
388 f.close()
377 except (IOError, OSError):
389 except (IOError, OSError):
378 return {}, nullid, nullrev
390 return {}, nullid, nullrev
379
391
380 try:
392 try:
381 last, lrev = lines.pop(0).split(" ", 1)
393 last, lrev = lines.pop(0).split(" ", 1)
382 last, lrev = bin(last), int(lrev)
394 last, lrev = bin(last), int(lrev)
383 if not (lrev < self.changelog.count() and
395 if not (lrev < self.changelog.count() and
384 self.changelog.node(lrev) == last): # sanity check
396 self.changelog.node(lrev) == last): # sanity check
385 # invalidate the cache
397 # invalidate the cache
386 raise ValueError('invalidating branch cache (tip differs)')
398 raise ValueError('invalidating branch cache (tip differs)')
387 for l in lines:
399 for l in lines:
388 if not l: continue
400 if not l: continue
389 node, label = l.split(" ", 1)
401 node, label = l.split(" ", 1)
390 partial[label.strip()] = bin(node)
402 partial[label.strip()] = bin(node)
391 except (KeyboardInterrupt, util.SignalInterrupt):
403 except (KeyboardInterrupt, util.SignalInterrupt):
392 raise
404 raise
393 except Exception, inst:
405 except Exception, inst:
394 if self.ui.debugflag:
406 if self.ui.debugflag:
395 self.ui.warn(str(inst), '\n')
407 self.ui.warn(str(inst), '\n')
396 partial, last, lrev = {}, nullid, nullrev
408 partial, last, lrev = {}, nullid, nullrev
397 return partial, last, lrev
409 return partial, last, lrev
398
410
399 def _writebranchcache(self, branches, tip, tiprev):
411 def _writebranchcache(self, branches, tip, tiprev):
400 try:
412 try:
401 f = self.opener("branch.cache", "w", atomictemp=True)
413 f = self.opener("branch.cache", "w", atomictemp=True)
402 f.write("%s %s\n" % (hex(tip), tiprev))
414 f.write("%s %s\n" % (hex(tip), tiprev))
403 for label, node in branches.iteritems():
415 for label, node in branches.iteritems():
404 f.write("%s %s\n" % (hex(node), label))
416 f.write("%s %s\n" % (hex(node), label))
405 f.rename()
417 f.rename()
406 except (IOError, OSError):
418 except (IOError, OSError):
407 pass
419 pass
408
420
409 def _updatebranchcache(self, partial, start, end):
421 def _updatebranchcache(self, partial, start, end):
410 for r in xrange(start, end):
422 for r in xrange(start, end):
411 c = self.changectx(r)
423 c = self.changectx(r)
412 b = c.branch()
424 b = c.branch()
413 partial[b] = c.node()
425 partial[b] = c.node()
414
426
415 def lookup(self, key):
427 def lookup(self, key):
416 if key == '.':
428 if key == '.':
417 key, second = self.dirstate.parents()
429 key, second = self.dirstate.parents()
418 if key == nullid:
430 if key == nullid:
419 raise repo.RepoError(_("no revision checked out"))
431 raise repo.RepoError(_("no revision checked out"))
420 if second != nullid:
432 if second != nullid:
421 self.ui.warn(_("warning: working directory has two parents, "
433 self.ui.warn(_("warning: working directory has two parents, "
422 "tag '.' uses the first\n"))
434 "tag '.' uses the first\n"))
423 elif key == 'null':
435 elif key == 'null':
424 return nullid
436 return nullid
425 n = self.changelog._match(key)
437 n = self.changelog._match(key)
426 if n:
438 if n:
427 return n
439 return n
428 if key in self.tags():
440 if key in self.tags():
429 return self.tags()[key]
441 return self.tags()[key]
430 if key in self.branchtags():
442 if key in self.branchtags():
431 return self.branchtags()[key]
443 return self.branchtags()[key]
432 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
433 if n:
445 if n:
434 return n
446 return n
435 try:
447 try:
436 if len(key) == 20:
448 if len(key) == 20:
437 key = hex(key)
449 key = hex(key)
438 except:
450 except:
439 pass
451 pass
440 raise repo.RepoError(_("unknown revision '%s'") % key)
452 raise repo.RepoError(_("unknown revision '%s'") % key)
441
453
442 def dev(self):
454 def dev(self):
443 return os.lstat(self.path).st_dev
455 return os.lstat(self.path).st_dev
444
456
445 def local(self):
457 def local(self):
446 return True
458 return True
447
459
448 def join(self, f):
460 def join(self, f):
449 return os.path.join(self.path, f)
461 return os.path.join(self.path, f)
450
462
451 def sjoin(self, f):
463 def sjoin(self, f):
452 f = self.encodefn(f)
464 f = self.encodefn(f)
453 return os.path.join(self.spath, f)
465 return os.path.join(self.spath, f)
454
466
455 def wjoin(self, f):
467 def wjoin(self, f):
456 return os.path.join(self.root, f)
468 return os.path.join(self.root, f)
457
469
458 def file(self, f):
470 def file(self, f):
459 if f[0] == '/':
471 if f[0] == '/':
460 f = f[1:]
472 f = f[1:]
461 return filelog.filelog(self.sopener, f)
473 return filelog.filelog(self.sopener, f)
462
474
463 def changectx(self, changeid=None):
475 def changectx(self, changeid=None):
464 return context.changectx(self, changeid)
476 return context.changectx(self, changeid)
465
477
466 def workingctx(self):
478 def workingctx(self):
467 return context.workingctx(self)
479 return context.workingctx(self)
468
480
469 def parents(self, changeid=None):
481 def parents(self, changeid=None):
470 '''
482 '''
471 get list of changectxs for parents of changeid or working directory
483 get list of changectxs for parents of changeid or working directory
472 '''
484 '''
473 if changeid is None:
485 if changeid is None:
474 pl = self.dirstate.parents()
486 pl = self.dirstate.parents()
475 else:
487 else:
476 n = self.changelog.lookup(changeid)
488 n = self.changelog.lookup(changeid)
477 pl = self.changelog.parents(n)
489 pl = self.changelog.parents(n)
478 if pl[1] == nullid:
490 if pl[1] == nullid:
479 return [self.changectx(pl[0])]
491 return [self.changectx(pl[0])]
480 return [self.changectx(pl[0]), self.changectx(pl[1])]
492 return [self.changectx(pl[0]), self.changectx(pl[1])]
481
493
482 def filectx(self, path, changeid=None, fileid=None):
494 def filectx(self, path, changeid=None, fileid=None):
483 """changeid can be a changeset revision, node, or tag.
495 """changeid can be a changeset revision, node, or tag.
484 fileid can be a file revision or node."""
496 fileid can be a file revision or node."""
485 return context.filectx(self, path, changeid, fileid)
497 return context.filectx(self, path, changeid, fileid)
486
498
487 def getcwd(self):
499 def getcwd(self):
488 return self.dirstate.getcwd()
500 return self.dirstate.getcwd()
489
501
490 def pathto(self, f, cwd=None):
502 def pathto(self, f, cwd=None):
491 return self.dirstate.pathto(f, cwd)
503 return self.dirstate.pathto(f, cwd)
492
504
493 def wfile(self, f, mode='r'):
505 def wfile(self, f, mode='r'):
494 return self.wopener(f, mode)
506 return self.wopener(f, mode)
495
507
496 def _link(self, f):
508 def _link(self, f):
497 return os.path.islink(self.wjoin(f))
509 return os.path.islink(self.wjoin(f))
498
510
499 def _filter(self, filter, filename, data):
511 def _filter(self, filter, filename, data):
500 if filter not in self.filterpats:
512 if filter not in self.filterpats:
501 l = []
513 l = []
502 for pat, cmd in self.ui.configitems(filter):
514 for pat, cmd in self.ui.configitems(filter):
503 mf = util.matcher(self.root, "", [pat], [], [])[1]
515 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 fn = None
516 fn = None
505 params = cmd
517 params = cmd
506 for name, filterfn in self._datafilters.iteritems():
518 for name, filterfn in self._datafilters.iteritems():
507 if cmd.startswith(name):
519 if cmd.startswith(name):
508 fn = filterfn
520 fn = filterfn
509 params = cmd[len(name):].lstrip()
521 params = cmd[len(name):].lstrip()
510 break
522 break
511 if not fn:
523 if not fn:
512 fn = lambda s, c, **kwargs: util.filter(s, c)
524 fn = lambda s, c, **kwargs: util.filter(s, c)
513 # Wrap old filters not supporting keyword arguments
525 # Wrap old filters not supporting keyword arguments
514 if not inspect.getargspec(fn)[2]:
526 if not inspect.getargspec(fn)[2]:
515 oldfn = fn
527 oldfn = fn
516 fn = lambda s, c, **kwargs: oldfn(s, c)
528 fn = lambda s, c, **kwargs: oldfn(s, c)
517 l.append((mf, fn, params))
529 l.append((mf, fn, params))
518 self.filterpats[filter] = l
530 self.filterpats[filter] = l
519
531
520 for mf, fn, cmd in self.filterpats[filter]:
532 for mf, fn, cmd in self.filterpats[filter]:
521 if mf(filename):
533 if mf(filename):
522 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
534 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
523 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
535 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
524 break
536 break
525
537
526 return data
538 return data
527
539
528 def adddatafilter(self, name, filter):
540 def adddatafilter(self, name, filter):
529 self._datafilters[name] = filter
541 self._datafilters[name] = filter
530
542
531 def wread(self, filename):
543 def wread(self, filename):
532 if self._link(filename):
544 if self._link(filename):
533 data = os.readlink(self.wjoin(filename))
545 data = os.readlink(self.wjoin(filename))
534 else:
546 else:
535 data = self.wopener(filename, 'r').read()
547 data = self.wopener(filename, 'r').read()
536 return self._filter("encode", filename, data)
548 return self._filter("encode", filename, data)
537
549
538 def wwrite(self, filename, data, flags):
550 def wwrite(self, filename, data, flags):
539 data = self._filter("decode", filename, data)
551 data = self._filter("decode", filename, data)
540 try:
552 try:
541 os.unlink(self.wjoin(filename))
553 os.unlink(self.wjoin(filename))
542 except OSError:
554 except OSError:
543 pass
555 pass
544 self.wopener(filename, 'w').write(data)
556 self.wopener(filename, 'w').write(data)
545 util.set_flags(self.wjoin(filename), flags)
557 util.set_flags(self.wjoin(filename), flags)
546
558
547 def wwritedata(self, filename, data):
559 def wwritedata(self, filename, data):
548 return self._filter("decode", filename, data)
560 return self._filter("decode", filename, data)
549
561
550 def transaction(self):
562 def transaction(self):
551 if self._transref and self._transref():
563 if self._transref and self._transref():
552 return self._transref().nest()
564 return self._transref().nest()
553
565
554 # abort here if the journal already exists
566 # abort here if the journal already exists
555 if os.path.exists(self.sjoin("journal")):
567 if os.path.exists(self.sjoin("journal")):
556 raise repo.RepoError(_("journal already exists - run hg recover"))
568 raise repo.RepoError(_("journal already exists - run hg recover"))
557
569
558 # save dirstate for rollback
570 # save dirstate for rollback
559 try:
571 try:
560 ds = self.opener("dirstate").read()
572 ds = self.opener("dirstate").read()
561 except IOError:
573 except IOError:
562 ds = ""
574 ds = ""
563 self.opener("journal.dirstate", "w").write(ds)
575 self.opener("journal.dirstate", "w").write(ds)
564 self.opener("journal.branch", "w").write(self.dirstate.branch())
576 self.opener("journal.branch", "w").write(self.dirstate.branch())
565
577
566 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 renames = [(self.sjoin("journal"), self.sjoin("undo")),
567 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 (self.join("journal.dirstate"), self.join("undo.dirstate")),
568 (self.join("journal.branch"), self.join("undo.branch"))]
580 (self.join("journal.branch"), self.join("undo.branch"))]
569 tr = transaction.transaction(self.ui.warn, self.sopener,
581 tr = transaction.transaction(self.ui.warn, self.sopener,
570 self.sjoin("journal"),
582 self.sjoin("journal"),
571 aftertrans(renames),
583 aftertrans(renames),
572 self._createmode)
584 self._createmode)
573 self._transref = weakref.ref(tr)
585 self._transref = weakref.ref(tr)
574 return tr
586 return tr
575
587
576 def recover(self):
588 def recover(self):
577 l = self.lock()
589 l = self.lock()
578 try:
590 try:
579 if os.path.exists(self.sjoin("journal")):
591 if os.path.exists(self.sjoin("journal")):
580 self.ui.status(_("rolling back interrupted transaction\n"))
592 self.ui.status(_("rolling back interrupted transaction\n"))
581 transaction.rollback(self.sopener, self.sjoin("journal"))
593 transaction.rollback(self.sopener, self.sjoin("journal"))
582 self.invalidate()
594 self.invalidate()
583 return True
595 return True
584 else:
596 else:
585 self.ui.warn(_("no interrupted transaction available\n"))
597 self.ui.warn(_("no interrupted transaction available\n"))
586 return False
598 return False
587 finally:
599 finally:
588 del l
600 del l
589
601
590 def rollback(self):
602 def rollback(self):
591 wlock = lock = None
603 wlock = lock = None
592 try:
604 try:
593 wlock = self.wlock()
605 wlock = self.wlock()
594 lock = self.lock()
606 lock = self.lock()
595 if os.path.exists(self.sjoin("undo")):
607 if os.path.exists(self.sjoin("undo")):
596 self.ui.status(_("rolling back last transaction\n"))
608 self.ui.status(_("rolling back last transaction\n"))
597 transaction.rollback(self.sopener, self.sjoin("undo"))
609 transaction.rollback(self.sopener, self.sjoin("undo"))
598 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
599 try:
611 try:
600 branch = self.opener("undo.branch").read()
612 branch = self.opener("undo.branch").read()
601 self.dirstate.setbranch(branch)
613 self.dirstate.setbranch(branch)
602 except IOError:
614 except IOError:
603 self.ui.warn(_("Named branch could not be reset, "
615 self.ui.warn(_("Named branch could not be reset, "
604 "current branch still is: %s\n")
616 "current branch still is: %s\n")
605 % util.tolocal(self.dirstate.branch()))
617 % util.tolocal(self.dirstate.branch()))
606 self.invalidate()
618 self.invalidate()
607 self.dirstate.invalidate()
619 self.dirstate.invalidate()
608 else:
620 else:
609 self.ui.warn(_("no rollback information available\n"))
621 self.ui.warn(_("no rollback information available\n"))
610 finally:
622 finally:
611 del lock, wlock
623 del lock, wlock
612
624
613 def invalidate(self):
625 def invalidate(self):
614 for a in "changelog manifest".split():
626 for a in "changelog manifest".split():
615 if hasattr(self, a):
627 if hasattr(self, a):
616 self.__delattr__(a)
628 self.__delattr__(a)
617 self.tagscache = None
629 self.tagscache = None
618 self._tagstypecache = None
630 self._tagstypecache = None
619 self.nodetagscache = None
631 self.nodetagscache = None
620 self.branchcache = None
632 self.branchcache = None
621 self._ubranchcache = None
633 self._ubranchcache = None
634 self._branchcachetip = None
622
635
623 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
624 try:
637 try:
625 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 l = lock.lock(lockname, 0, releasefn, desc=desc)
626 except lock.LockHeld, inst:
639 except lock.LockHeld, inst:
627 if not wait:
640 if not wait:
628 raise
641 raise
629 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 self.ui.warn(_("waiting for lock on %s held by %r\n") %
630 (desc, inst.locker))
643 (desc, inst.locker))
631 # default to 600 seconds timeout
644 # default to 600 seconds timeout
632 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
633 releasefn, desc=desc)
646 releasefn, desc=desc)
634 if acquirefn:
647 if acquirefn:
635 acquirefn()
648 acquirefn()
636 return l
649 return l
637
650
638 def lock(self, wait=True):
651 def lock(self, wait=True):
639 if self._lockref and self._lockref():
652 if self._lockref and self._lockref():
640 return self._lockref()
653 return self._lockref()
641
654
642 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
655 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
643 _('repository %s') % self.origroot)
656 _('repository %s') % self.origroot)
644 self._lockref = weakref.ref(l)
657 self._lockref = weakref.ref(l)
645 return l
658 return l
646
659
647 def wlock(self, wait=True):
660 def wlock(self, wait=True):
648 if self._wlockref and self._wlockref():
661 if self._wlockref and self._wlockref():
649 return self._wlockref()
662 return self._wlockref()
650
663
651 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
652 self.dirstate.invalidate, _('working directory of %s') %
665 self.dirstate.invalidate, _('working directory of %s') %
653 self.origroot)
666 self.origroot)
654 self._wlockref = weakref.ref(l)
667 self._wlockref = weakref.ref(l)
655 return l
668 return l
656
669
657 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
670 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
658 """
671 """
659 commit an individual file as part of a larger transaction
672 commit an individual file as part of a larger transaction
660 """
673 """
661
674
662 t = self.wread(fn)
675 t = self.wread(fn)
663 fl = self.file(fn)
676 fl = self.file(fn)
664 fp1 = manifest1.get(fn, nullid)
677 fp1 = manifest1.get(fn, nullid)
665 fp2 = manifest2.get(fn, nullid)
678 fp2 = manifest2.get(fn, nullid)
666
679
667 meta = {}
680 meta = {}
668 cp = self.dirstate.copied(fn)
681 cp = self.dirstate.copied(fn)
669 if cp:
682 if cp:
670 # Mark the new revision of this file as a copy of another
683 # Mark the new revision of this file as a copy of another
671 # file. This copy data will effectively act as a parent
684 # file. This copy data will effectively act as a parent
672 # of this new revision. If this is a merge, the first
685 # of this new revision. If this is a merge, the first
673 # parent will be the nullid (meaning "look up the copy data")
686 # parent will be the nullid (meaning "look up the copy data")
674 # and the second one will be the other parent. For example:
687 # and the second one will be the other parent. For example:
675 #
688 #
676 # 0 --- 1 --- 3 rev1 changes file foo
689 # 0 --- 1 --- 3 rev1 changes file foo
677 # \ / rev2 renames foo to bar and changes it
690 # \ / rev2 renames foo to bar and changes it
678 # \- 2 -/ rev3 should have bar with all changes and
691 # \- 2 -/ rev3 should have bar with all changes and
679 # should record that bar descends from
692 # should record that bar descends from
680 # bar in rev2 and foo in rev1
693 # bar in rev2 and foo in rev1
681 #
694 #
682 # this allows this merge to succeed:
695 # this allows this merge to succeed:
683 #
696 #
684 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
697 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
685 # \ / merging rev3 and rev4 should use bar@rev2
698 # \ / merging rev3 and rev4 should use bar@rev2
686 # \- 2 --- 4 as the merge base
699 # \- 2 --- 4 as the merge base
687 #
700 #
688 meta["copy"] = cp
701 meta["copy"] = cp
689 if not manifest2: # not a branch merge
702 if not manifest2: # not a branch merge
690 meta["copyrev"] = hex(manifest1.get(cp, nullid))
703 meta["copyrev"] = hex(manifest1.get(cp, nullid))
691 fp2 = nullid
704 fp2 = nullid
692 elif fp2 != nullid: # copied on remote side
705 elif fp2 != nullid: # copied on remote side
693 meta["copyrev"] = hex(manifest1.get(cp, nullid))
706 meta["copyrev"] = hex(manifest1.get(cp, nullid))
694 elif fp1 != nullid: # copied on local side, reversed
707 elif fp1 != nullid: # copied on local side, reversed
695 meta["copyrev"] = hex(manifest2.get(cp))
708 meta["copyrev"] = hex(manifest2.get(cp))
696 fp2 = fp1
709 fp2 = fp1
697 elif cp in manifest2: # directory rename on local side
710 elif cp in manifest2: # directory rename on local side
698 meta["copyrev"] = hex(manifest2[cp])
711 meta["copyrev"] = hex(manifest2[cp])
699 else: # directory rename on remote side
712 else: # directory rename on remote side
700 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 meta["copyrev"] = hex(manifest1.get(cp, nullid))
701 self.ui.debug(_(" %s: copy %s:%s\n") %
714 self.ui.debug(_(" %s: copy %s:%s\n") %
702 (fn, cp, meta["copyrev"]))
715 (fn, cp, meta["copyrev"]))
703 fp1 = nullid
716 fp1 = nullid
704 elif fp2 != nullid:
717 elif fp2 != nullid:
705 # is one parent an ancestor of the other?
718 # is one parent an ancestor of the other?
706 fpa = fl.ancestor(fp1, fp2)
719 fpa = fl.ancestor(fp1, fp2)
707 if fpa == fp1:
720 if fpa == fp1:
708 fp1, fp2 = fp2, nullid
721 fp1, fp2 = fp2, nullid
709 elif fpa == fp2:
722 elif fpa == fp2:
710 fp2 = nullid
723 fp2 = nullid
711
724
712 # is the file unmodified from the parent? report existing entry
725 # is the file unmodified from the parent? report existing entry
713 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
726 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
714 return fp1
727 return fp1
715
728
716 changelist.append(fn)
729 changelist.append(fn)
717 return fl.add(t, meta, tr, linkrev, fp1, fp2)
730 return fl.add(t, meta, tr, linkrev, fp1, fp2)
718
731
719 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
732 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
720 if p1 is None:
733 if p1 is None:
721 p1, p2 = self.dirstate.parents()
734 p1, p2 = self.dirstate.parents()
722 return self.commit(files=files, text=text, user=user, date=date,
735 return self.commit(files=files, text=text, user=user, date=date,
723 p1=p1, p2=p2, extra=extra, empty_ok=True)
736 p1=p1, p2=p2, extra=extra, empty_ok=True)
724
737
725 def commit(self, files=None, text="", user=None, date=None,
738 def commit(self, files=None, text="", user=None, date=None,
726 match=util.always, force=False, force_editor=False,
739 match=util.always, force=False, force_editor=False,
727 p1=None, p2=None, extra={}, empty_ok=False):
740 p1=None, p2=None, extra={}, empty_ok=False):
728 wlock = lock = tr = None
741 wlock = lock = tr = None
729 valid = 0 # don't save the dirstate if this isn't set
742 valid = 0 # don't save the dirstate if this isn't set
730 if files:
743 if files:
731 files = util.unique(files)
744 files = util.unique(files)
732 try:
745 try:
733 commit = []
746 commit = []
734 remove = []
747 remove = []
735 changed = []
748 changed = []
736 use_dirstate = (p1 is None) # not rawcommit
749 use_dirstate = (p1 is None) # not rawcommit
737 extra = extra.copy()
750 extra = extra.copy()
738
751
739 if use_dirstate:
752 if use_dirstate:
740 if files:
753 if files:
741 for f in files:
754 for f in files:
742 s = self.dirstate[f]
755 s = self.dirstate[f]
743 if s in 'nma':
756 if s in 'nma':
744 commit.append(f)
757 commit.append(f)
745 elif s == 'r':
758 elif s == 'r':
746 remove.append(f)
759 remove.append(f)
747 else:
760 else:
748 self.ui.warn(_("%s not tracked!\n") % f)
761 self.ui.warn(_("%s not tracked!\n") % f)
749 else:
762 else:
750 changes = self.status(match=match)[:5]
763 changes = self.status(match=match)[:5]
751 modified, added, removed, deleted, unknown = changes
764 modified, added, removed, deleted, unknown = changes
752 commit = modified + added
765 commit = modified + added
753 remove = removed
766 remove = removed
754 else:
767 else:
755 commit = files
768 commit = files
756
769
757 if use_dirstate:
770 if use_dirstate:
758 p1, p2 = self.dirstate.parents()
771 p1, p2 = self.dirstate.parents()
759 update_dirstate = True
772 update_dirstate = True
760 else:
773 else:
761 p1, p2 = p1, p2 or nullid
774 p1, p2 = p1, p2 or nullid
762 update_dirstate = (self.dirstate.parents()[0] == p1)
775 update_dirstate = (self.dirstate.parents()[0] == p1)
763
776
764 c1 = self.changelog.read(p1)
777 c1 = self.changelog.read(p1)
765 c2 = self.changelog.read(p2)
778 c2 = self.changelog.read(p2)
766 m1 = self.manifest.read(c1[0]).copy()
779 m1 = self.manifest.read(c1[0]).copy()
767 m2 = self.manifest.read(c2[0])
780 m2 = self.manifest.read(c2[0])
768
781
769 if use_dirstate:
782 if use_dirstate:
770 branchname = self.workingctx().branch()
783 branchname = self.workingctx().branch()
771 try:
784 try:
772 branchname = branchname.decode('UTF-8').encode('UTF-8')
785 branchname = branchname.decode('UTF-8').encode('UTF-8')
773 except UnicodeDecodeError:
786 except UnicodeDecodeError:
774 raise util.Abort(_('branch name not in UTF-8!'))
787 raise util.Abort(_('branch name not in UTF-8!'))
775 else:
788 else:
776 branchname = ""
789 branchname = ""
777
790
778 if use_dirstate:
791 if use_dirstate:
779 oldname = c1[5].get("branch") # stored in UTF-8
792 oldname = c1[5].get("branch") # stored in UTF-8
780 if (not commit and not remove and not force and p2 == nullid
793 if (not commit and not remove and not force and p2 == nullid
781 and branchname == oldname):
794 and branchname == oldname):
782 self.ui.status(_("nothing changed\n"))
795 self.ui.status(_("nothing changed\n"))
783 return None
796 return None
784
797
785 xp1 = hex(p1)
798 xp1 = hex(p1)
786 if p2 == nullid: xp2 = ''
799 if p2 == nullid: xp2 = ''
787 else: xp2 = hex(p2)
800 else: xp2 = hex(p2)
788
801
789 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
802 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
790
803
791 wlock = self.wlock()
804 wlock = self.wlock()
792 lock = self.lock()
805 lock = self.lock()
793 tr = self.transaction()
806 tr = self.transaction()
794 trp = weakref.proxy(tr)
807 trp = weakref.proxy(tr)
795
808
796 # check in files
809 # check in files
797 new = {}
810 new = {}
798 linkrev = self.changelog.count()
811 linkrev = self.changelog.count()
799 commit.sort()
812 commit.sort()
800 is_exec = util.execfunc(self.root, m1.execf)
813 is_exec = util.execfunc(self.root, m1.execf)
801 is_link = util.linkfunc(self.root, m1.linkf)
814 is_link = util.linkfunc(self.root, m1.linkf)
802 for f in commit:
815 for f in commit:
803 self.ui.note(f + "\n")
816 self.ui.note(f + "\n")
804 try:
817 try:
805 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
818 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
806 new_exec = is_exec(f)
819 new_exec = is_exec(f)
807 new_link = is_link(f)
820 new_link = is_link(f)
808 if ((not changed or changed[-1] != f) and
821 if ((not changed or changed[-1] != f) and
809 m2.get(f) != new[f]):
822 m2.get(f) != new[f]):
810 # mention the file in the changelog if some
823 # mention the file in the changelog if some
811 # flag changed, even if there was no content
824 # flag changed, even if there was no content
812 # change.
825 # change.
813 old_exec = m1.execf(f)
826 old_exec = m1.execf(f)
814 old_link = m1.linkf(f)
827 old_link = m1.linkf(f)
815 if old_exec != new_exec or old_link != new_link:
828 if old_exec != new_exec or old_link != new_link:
816 changed.append(f)
829 changed.append(f)
817 m1.set(f, new_exec, new_link)
830 m1.set(f, new_exec, new_link)
818 if use_dirstate:
831 if use_dirstate:
819 self.dirstate.normal(f)
832 self.dirstate.normal(f)
820
833
821 except (OSError, IOError):
834 except (OSError, IOError):
822 if use_dirstate:
835 if use_dirstate:
823 self.ui.warn(_("trouble committing %s!\n") % f)
836 self.ui.warn(_("trouble committing %s!\n") % f)
824 raise
837 raise
825 else:
838 else:
826 remove.append(f)
839 remove.append(f)
827
840
828 # update manifest
841 # update manifest
829 m1.update(new)
842 m1.update(new)
830 remove.sort()
843 remove.sort()
831 removed = []
844 removed = []
832
845
833 for f in remove:
846 for f in remove:
834 if f in m1:
847 if f in m1:
835 del m1[f]
848 del m1[f]
836 removed.append(f)
849 removed.append(f)
837 elif f in m2:
850 elif f in m2:
838 removed.append(f)
851 removed.append(f)
839 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
852 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
840 (new, removed))
853 (new, removed))
841
854
842 # add changeset
855 # add changeset
843 new = new.keys()
856 new = new.keys()
844 new.sort()
857 new.sort()
845
858
846 user = user or self.ui.username()
859 user = user or self.ui.username()
847 if (not empty_ok and not text) or force_editor:
860 if (not empty_ok and not text) or force_editor:
848 edittext = []
861 edittext = []
849 if text:
862 if text:
850 edittext.append(text)
863 edittext.append(text)
851 edittext.append("")
864 edittext.append("")
852 edittext.append(_("HG: Enter commit message."
865 edittext.append(_("HG: Enter commit message."
853 " Lines beginning with 'HG:' are removed."))
866 " Lines beginning with 'HG:' are removed."))
854 edittext.append("HG: --")
867 edittext.append("HG: --")
855 edittext.append("HG: user: %s" % user)
868 edittext.append("HG: user: %s" % user)
856 if p2 != nullid:
869 if p2 != nullid:
857 edittext.append("HG: branch merge")
870 edittext.append("HG: branch merge")
858 if branchname:
871 if branchname:
859 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
872 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
860 edittext.extend(["HG: changed %s" % f for f in changed])
873 edittext.extend(["HG: changed %s" % f for f in changed])
861 edittext.extend(["HG: removed %s" % f for f in removed])
874 edittext.extend(["HG: removed %s" % f for f in removed])
862 if not changed and not remove:
875 if not changed and not remove:
863 edittext.append("HG: no files changed")
876 edittext.append("HG: no files changed")
864 edittext.append("")
877 edittext.append("")
865 # run editor in the repository root
878 # run editor in the repository root
866 olddir = os.getcwd()
879 olddir = os.getcwd()
867 os.chdir(self.root)
880 os.chdir(self.root)
868 text = self.ui.edit("\n".join(edittext), user)
881 text = self.ui.edit("\n".join(edittext), user)
869 os.chdir(olddir)
882 os.chdir(olddir)
870
883
871 if branchname:
884 if branchname:
872 extra["branch"] = branchname
885 extra["branch"] = branchname
873
886
874 if use_dirstate:
887 if use_dirstate:
875 lines = [line.rstrip() for line in text.rstrip().splitlines()]
888 lines = [line.rstrip() for line in text.rstrip().splitlines()]
876 while lines and not lines[0]:
889 while lines and not lines[0]:
877 del lines[0]
890 del lines[0]
878 if not lines:
891 if not lines:
879 raise util.Abort(_("empty commit message"))
892 raise util.Abort(_("empty commit message"))
880 text = '\n'.join(lines)
893 text = '\n'.join(lines)
881
894
882 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
895 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
883 user, date, extra)
896 user, date, extra)
884 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
897 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
885 parent2=xp2)
898 parent2=xp2)
886 tr.close()
899 tr.close()
887
900
888 if self.branchcache and "branch" in extra:
901 if self.branchcache:
889 self.branchcache[util.tolocal(extra["branch"])] = n
902 self.branchtags()
890
903
891 if use_dirstate or update_dirstate:
904 if use_dirstate or update_dirstate:
892 self.dirstate.setparents(n)
905 self.dirstate.setparents(n)
893 if use_dirstate:
906 if use_dirstate:
894 for f in removed:
907 for f in removed:
895 self.dirstate.forget(f)
908 self.dirstate.forget(f)
896 valid = 1 # our dirstate updates are complete
909 valid = 1 # our dirstate updates are complete
897
910
898 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
911 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
899 return n
912 return n
900 finally:
913 finally:
901 if not valid: # don't save our updated dirstate
914 if not valid: # don't save our updated dirstate
902 self.dirstate.invalidate()
915 self.dirstate.invalidate()
903 del tr, lock, wlock
916 del tr, lock, wlock
904
917
905 def walk(self, node=None, files=[], match=util.always, badmatch=None):
918 def walk(self, node=None, files=[], match=util.always, badmatch=None):
906 '''
919 '''
907 walk recursively through the directory tree or a given
920 walk recursively through the directory tree or a given
908 changeset, finding all files matched by the match
921 changeset, finding all files matched by the match
909 function
922 function
910
923
911 results are yielded in a tuple (src, filename), where src
924 results are yielded in a tuple (src, filename), where src
912 is one of:
925 is one of:
913 'f' the file was found in the directory tree
926 'f' the file was found in the directory tree
914 'm' the file was only in the dirstate and not in the tree
927 'm' the file was only in the dirstate and not in the tree
915 'b' file was not found and matched badmatch
928 'b' file was not found and matched badmatch
916 '''
929 '''
917
930
918 if node:
931 if node:
919 fdict = dict.fromkeys(files)
932 fdict = dict.fromkeys(files)
920 # for dirstate.walk, files=['.'] means "walk the whole tree".
933 # for dirstate.walk, files=['.'] means "walk the whole tree".
921 # follow that here, too
934 # follow that here, too
922 fdict.pop('.', None)
935 fdict.pop('.', None)
923 mdict = self.manifest.read(self.changelog.read(node)[0])
936 mdict = self.manifest.read(self.changelog.read(node)[0])
924 mfiles = mdict.keys()
937 mfiles = mdict.keys()
925 mfiles.sort()
938 mfiles.sort()
926 for fn in mfiles:
939 for fn in mfiles:
927 for ffn in fdict:
940 for ffn in fdict:
928 # match if the file is the exact name or a directory
941 # match if the file is the exact name or a directory
929 if ffn == fn or fn.startswith("%s/" % ffn):
942 if ffn == fn or fn.startswith("%s/" % ffn):
930 del fdict[ffn]
943 del fdict[ffn]
931 break
944 break
932 if match(fn):
945 if match(fn):
933 yield 'm', fn
946 yield 'm', fn
934 ffiles = fdict.keys()
947 ffiles = fdict.keys()
935 ffiles.sort()
948 ffiles.sort()
936 for fn in ffiles:
949 for fn in ffiles:
937 if badmatch and badmatch(fn):
950 if badmatch and badmatch(fn):
938 if match(fn):
951 if match(fn):
939 yield 'b', fn
952 yield 'b', fn
940 else:
953 else:
941 self.ui.warn(_('%s: No such file in rev %s\n')
954 self.ui.warn(_('%s: No such file in rev %s\n')
942 % (self.pathto(fn), short(node)))
955 % (self.pathto(fn), short(node)))
943 else:
956 else:
944 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
957 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
945 yield src, fn
958 yield src, fn
946
959
947 def status(self, node1=None, node2=None, files=[], match=util.always,
960 def status(self, node1=None, node2=None, files=[], match=util.always,
948 list_ignored=False, list_clean=False):
961 list_ignored=False, list_clean=False):
949 """return status of files between two nodes or node and working directory
962 """return status of files between two nodes or node and working directory
950
963
951 If node1 is None, use the first dirstate parent instead.
964 If node1 is None, use the first dirstate parent instead.
952 If node2 is None, compare node1 with working directory.
965 If node2 is None, compare node1 with working directory.
953 """
966 """
954
967
955 def fcmp(fn, getnode):
968 def fcmp(fn, getnode):
956 t1 = self.wread(fn)
969 t1 = self.wread(fn)
957 return self.file(fn).cmp(getnode(fn), t1)
970 return self.file(fn).cmp(getnode(fn), t1)
958
971
959 def mfmatches(node):
972 def mfmatches(node):
960 change = self.changelog.read(node)
973 change = self.changelog.read(node)
961 mf = self.manifest.read(change[0]).copy()
974 mf = self.manifest.read(change[0]).copy()
962 for fn in mf.keys():
975 for fn in mf.keys():
963 if not match(fn):
976 if not match(fn):
964 del mf[fn]
977 del mf[fn]
965 return mf
978 return mf
966
979
967 modified, added, removed, deleted, unknown = [], [], [], [], []
980 modified, added, removed, deleted, unknown = [], [], [], [], []
968 ignored, clean = [], []
981 ignored, clean = [], []
969
982
970 compareworking = False
983 compareworking = False
971 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
984 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
972 compareworking = True
985 compareworking = True
973
986
974 if not compareworking:
987 if not compareworking:
975 # read the manifest from node1 before the manifest from node2,
988 # read the manifest from node1 before the manifest from node2,
976 # so that we'll hit the manifest cache if we're going through
989 # so that we'll hit the manifest cache if we're going through
977 # all the revisions in parent->child order.
990 # all the revisions in parent->child order.
978 mf1 = mfmatches(node1)
991 mf1 = mfmatches(node1)
979
992
980 # are we comparing the working directory?
993 # are we comparing the working directory?
981 if not node2:
994 if not node2:
982 (lookup, modified, added, removed, deleted, unknown,
995 (lookup, modified, added, removed, deleted, unknown,
983 ignored, clean) = self.dirstate.status(files, match,
996 ignored, clean) = self.dirstate.status(files, match,
984 list_ignored, list_clean)
997 list_ignored, list_clean)
985
998
986 # are we comparing working dir against its parent?
999 # are we comparing working dir against its parent?
987 if compareworking:
1000 if compareworking:
988 if lookup:
1001 if lookup:
989 fixup = []
1002 fixup = []
990 # do a full compare of any files that might have changed
1003 # do a full compare of any files that might have changed
991 ctx = self.changectx()
1004 ctx = self.changectx()
992 for f in lookup:
1005 for f in lookup:
993 if f not in ctx or ctx[f].cmp(self.wread(f)):
1006 if f not in ctx or ctx[f].cmp(self.wread(f)):
994 modified.append(f)
1007 modified.append(f)
995 else:
1008 else:
996 fixup.append(f)
1009 fixup.append(f)
997 if list_clean:
1010 if list_clean:
998 clean.append(f)
1011 clean.append(f)
999
1012
1000 # update dirstate for files that are actually clean
1013 # update dirstate for files that are actually clean
1001 if fixup:
1014 if fixup:
1002 wlock = None
1015 wlock = None
1003 try:
1016 try:
1004 try:
1017 try:
1005 wlock = self.wlock(False)
1018 wlock = self.wlock(False)
1006 except lock.LockException:
1019 except lock.LockException:
1007 pass
1020 pass
1008 if wlock:
1021 if wlock:
1009 for f in fixup:
1022 for f in fixup:
1010 self.dirstate.normal(f)
1023 self.dirstate.normal(f)
1011 finally:
1024 finally:
1012 del wlock
1025 del wlock
1013 else:
1026 else:
1014 # we are comparing working dir against non-parent
1027 # we are comparing working dir against non-parent
1015 # generate a pseudo-manifest for the working dir
1028 # generate a pseudo-manifest for the working dir
1016 # XXX: create it in dirstate.py ?
1029 # XXX: create it in dirstate.py ?
1017 mf2 = mfmatches(self.dirstate.parents()[0])
1030 mf2 = mfmatches(self.dirstate.parents()[0])
1018 is_exec = util.execfunc(self.root, mf2.execf)
1031 is_exec = util.execfunc(self.root, mf2.execf)
1019 is_link = util.linkfunc(self.root, mf2.linkf)
1032 is_link = util.linkfunc(self.root, mf2.linkf)
1020 for f in lookup + modified + added:
1033 for f in lookup + modified + added:
1021 mf2[f] = ""
1034 mf2[f] = ""
1022 mf2.set(f, is_exec(f), is_link(f))
1035 mf2.set(f, is_exec(f), is_link(f))
1023 for f in removed:
1036 for f in removed:
1024 if f in mf2:
1037 if f in mf2:
1025 del mf2[f]
1038 del mf2[f]
1026
1039
1027 else:
1040 else:
1028 # we are comparing two revisions
1041 # we are comparing two revisions
1029 mf2 = mfmatches(node2)
1042 mf2 = mfmatches(node2)
1030
1043
1031 if not compareworking:
1044 if not compareworking:
1032 # flush lists from dirstate before comparing manifests
1045 # flush lists from dirstate before comparing manifests
1033 modified, added, clean = [], [], []
1046 modified, added, clean = [], [], []
1034
1047
1035 # make sure to sort the files so we talk to the disk in a
1048 # make sure to sort the files so we talk to the disk in a
1036 # reasonable order
1049 # reasonable order
1037 mf2keys = mf2.keys()
1050 mf2keys = mf2.keys()
1038 mf2keys.sort()
1051 mf2keys.sort()
1039 getnode = lambda fn: mf1.get(fn, nullid)
1052 getnode = lambda fn: mf1.get(fn, nullid)
1040 for fn in mf2keys:
1053 for fn in mf2keys:
1041 if fn in mf1:
1054 if fn in mf1:
1042 if (mf1.flags(fn) != mf2.flags(fn) or
1055 if (mf1.flags(fn) != mf2.flags(fn) or
1043 (mf1[fn] != mf2[fn] and
1056 (mf1[fn] != mf2[fn] and
1044 (mf2[fn] != "" or fcmp(fn, getnode)))):
1057 (mf2[fn] != "" or fcmp(fn, getnode)))):
1045 modified.append(fn)
1058 modified.append(fn)
1046 elif list_clean:
1059 elif list_clean:
1047 clean.append(fn)
1060 clean.append(fn)
1048 del mf1[fn]
1061 del mf1[fn]
1049 else:
1062 else:
1050 added.append(fn)
1063 added.append(fn)
1051
1064
1052 removed = mf1.keys()
1065 removed = mf1.keys()
1053
1066
1054 # sort and return results:
1067 # sort and return results:
1055 for l in modified, added, removed, deleted, unknown, ignored, clean:
1068 for l in modified, added, removed, deleted, unknown, ignored, clean:
1056 l.sort()
1069 l.sort()
1057 return (modified, added, removed, deleted, unknown, ignored, clean)
1070 return (modified, added, removed, deleted, unknown, ignored, clean)
1058
1071
1059 def add(self, list):
1072 def add(self, list):
1060 wlock = self.wlock()
1073 wlock = self.wlock()
1061 try:
1074 try:
1062 rejected = []
1075 rejected = []
1063 for f in list:
1076 for f in list:
1064 p = self.wjoin(f)
1077 p = self.wjoin(f)
1065 try:
1078 try:
1066 st = os.lstat(p)
1079 st = os.lstat(p)
1067 except:
1080 except:
1068 self.ui.warn(_("%s does not exist!\n") % f)
1081 self.ui.warn(_("%s does not exist!\n") % f)
1069 rejected.append(f)
1082 rejected.append(f)
1070 continue
1083 continue
1071 if st.st_size > 10000000:
1084 if st.st_size > 10000000:
1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1085 self.ui.warn(_("%s: files over 10MB may cause memory and"
1073 " performance problems\n"
1086 " performance problems\n"
1074 "(use 'hg revert %s' to unadd the file)\n")
1087 "(use 'hg revert %s' to unadd the file)\n")
1075 % (f, f))
1088 % (f, f))
1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1089 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1077 self.ui.warn(_("%s not added: only files and symlinks "
1090 self.ui.warn(_("%s not added: only files and symlinks "
1078 "supported currently\n") % f)
1091 "supported currently\n") % f)
1079 rejected.append(p)
1092 rejected.append(p)
1080 elif self.dirstate[f] in 'amn':
1093 elif self.dirstate[f] in 'amn':
1081 self.ui.warn(_("%s already tracked!\n") % f)
1094 self.ui.warn(_("%s already tracked!\n") % f)
1082 elif self.dirstate[f] == 'r':
1095 elif self.dirstate[f] == 'r':
1083 self.dirstate.normallookup(f)
1096 self.dirstate.normallookup(f)
1084 else:
1097 else:
1085 self.dirstate.add(f)
1098 self.dirstate.add(f)
1086 return rejected
1099 return rejected
1087 finally:
1100 finally:
1088 del wlock
1101 del wlock
1089
1102
1090 def forget(self, list):
1103 def forget(self, list):
1091 wlock = self.wlock()
1104 wlock = self.wlock()
1092 try:
1105 try:
1093 for f in list:
1106 for f in list:
1094 if self.dirstate[f] != 'a':
1107 if self.dirstate[f] != 'a':
1095 self.ui.warn(_("%s not added!\n") % f)
1108 self.ui.warn(_("%s not added!\n") % f)
1096 else:
1109 else:
1097 self.dirstate.forget(f)
1110 self.dirstate.forget(f)
1098 finally:
1111 finally:
1099 del wlock
1112 del wlock
1100
1113
1101 def remove(self, list, unlink=False):
1114 def remove(self, list, unlink=False):
1102 wlock = None
1115 wlock = None
1103 try:
1116 try:
1104 if unlink:
1117 if unlink:
1105 for f in list:
1118 for f in list:
1106 try:
1119 try:
1107 util.unlink(self.wjoin(f))
1120 util.unlink(self.wjoin(f))
1108 except OSError, inst:
1121 except OSError, inst:
1109 if inst.errno != errno.ENOENT:
1122 if inst.errno != errno.ENOENT:
1110 raise
1123 raise
1111 wlock = self.wlock()
1124 wlock = self.wlock()
1112 for f in list:
1125 for f in list:
1113 if unlink and os.path.exists(self.wjoin(f)):
1126 if unlink and os.path.exists(self.wjoin(f)):
1114 self.ui.warn(_("%s still exists!\n") % f)
1127 self.ui.warn(_("%s still exists!\n") % f)
1115 elif self.dirstate[f] == 'a':
1128 elif self.dirstate[f] == 'a':
1116 self.dirstate.forget(f)
1129 self.dirstate.forget(f)
1117 elif f not in self.dirstate:
1130 elif f not in self.dirstate:
1118 self.ui.warn(_("%s not tracked!\n") % f)
1131 self.ui.warn(_("%s not tracked!\n") % f)
1119 else:
1132 else:
1120 self.dirstate.remove(f)
1133 self.dirstate.remove(f)
1121 finally:
1134 finally:
1122 del wlock
1135 del wlock
1123
1136
1124 def undelete(self, list):
1137 def undelete(self, list):
1125 wlock = None
1138 wlock = None
1126 try:
1139 try:
1127 manifests = [self.manifest.read(self.changelog.read(p)[0])
1140 manifests = [self.manifest.read(self.changelog.read(p)[0])
1128 for p in self.dirstate.parents() if p != nullid]
1141 for p in self.dirstate.parents() if p != nullid]
1129 wlock = self.wlock()
1142 wlock = self.wlock()
1130 for f in list:
1143 for f in list:
1131 if self.dirstate[f] != 'r':
1144 if self.dirstate[f] != 'r':
1132 self.ui.warn("%s not removed!\n" % f)
1145 self.ui.warn("%s not removed!\n" % f)
1133 else:
1146 else:
1134 m = f in manifests[0] and manifests[0] or manifests[1]
1147 m = f in manifests[0] and manifests[0] or manifests[1]
1135 t = self.file(f).read(m[f])
1148 t = self.file(f).read(m[f])
1136 self.wwrite(f, t, m.flags(f))
1149 self.wwrite(f, t, m.flags(f))
1137 self.dirstate.normal(f)
1150 self.dirstate.normal(f)
1138 finally:
1151 finally:
1139 del wlock
1152 del wlock
1140
1153
1141 def copy(self, source, dest):
1154 def copy(self, source, dest):
1142 wlock = None
1155 wlock = None
1143 try:
1156 try:
1144 p = self.wjoin(dest)
1157 p = self.wjoin(dest)
1145 if not (os.path.exists(p) or os.path.islink(p)):
1158 if not (os.path.exists(p) or os.path.islink(p)):
1146 self.ui.warn(_("%s does not exist!\n") % dest)
1159 self.ui.warn(_("%s does not exist!\n") % dest)
1147 elif not (os.path.isfile(p) or os.path.islink(p)):
1160 elif not (os.path.isfile(p) or os.path.islink(p)):
1148 self.ui.warn(_("copy failed: %s is not a file or a "
1161 self.ui.warn(_("copy failed: %s is not a file or a "
1149 "symbolic link\n") % dest)
1162 "symbolic link\n") % dest)
1150 else:
1163 else:
1151 wlock = self.wlock()
1164 wlock = self.wlock()
1152 if dest not in self.dirstate:
1165 if dest not in self.dirstate:
1153 self.dirstate.add(dest)
1166 self.dirstate.add(dest)
1154 self.dirstate.copy(source, dest)
1167 self.dirstate.copy(source, dest)
1155 finally:
1168 finally:
1156 del wlock
1169 del wlock
1157
1170
1158 def heads(self, start=None):
1171 def heads(self, start=None):
1159 heads = self.changelog.heads(start)
1172 heads = self.changelog.heads(start)
1160 # sort the output in rev descending order
1173 # sort the output in rev descending order
1161 heads = [(-self.changelog.rev(h), h) for h in heads]
1174 heads = [(-self.changelog.rev(h), h) for h in heads]
1162 heads.sort()
1175 heads.sort()
1163 return [n for (r, n) in heads]
1176 return [n for (r, n) in heads]
1164
1177
1165 def branchheads(self, branch, start=None):
1178 def branchheads(self, branch, start=None):
1166 branches = self.branchtags()
1179 branches = self.branchtags()
1167 if branch not in branches:
1180 if branch not in branches:
1168 return []
1181 return []
1169 # The basic algorithm is this:
1182 # The basic algorithm is this:
1170 #
1183 #
1171 # Start from the branch tip since there are no later revisions that can
1184 # Start from the branch tip since there are no later revisions that can
1172 # possibly be in this branch, and the tip is a guaranteed head.
1185 # possibly be in this branch, and the tip is a guaranteed head.
1173 #
1186 #
1174 # Remember the tip's parents as the first ancestors, since these by
1187 # Remember the tip's parents as the first ancestors, since these by
1175 # definition are not heads.
1188 # definition are not heads.
1176 #
1189 #
1177 # Step backwards from the brach tip through all the revisions. We are
1190 # Step backwards from the brach tip through all the revisions. We are
1178 # guaranteed by the rules of Mercurial that we will now be visiting the
1191 # guaranteed by the rules of Mercurial that we will now be visiting the
1179 # nodes in reverse topological order (children before parents).
1192 # nodes in reverse topological order (children before parents).
1180 #
1193 #
1181 # If a revision is one of the ancestors of a head then we can toss it
1194 # If a revision is one of the ancestors of a head then we can toss it
1182 # out of the ancestors set (we've already found it and won't be
1195 # out of the ancestors set (we've already found it and won't be
1183 # visiting it again) and put its parents in the ancestors set.
1196 # visiting it again) and put its parents in the ancestors set.
1184 #
1197 #
1185 # Otherwise, if a revision is in the branch it's another head, since it
1198 # Otherwise, if a revision is in the branch it's another head, since it
1186 # wasn't in the ancestor list of an existing head. So add it to the
1199 # wasn't in the ancestor list of an existing head. So add it to the
1187 # head list, and add its parents to the ancestor list.
1200 # head list, and add its parents to the ancestor list.
1188 #
1201 #
1189 # If it is not in the branch ignore it.
1202 # If it is not in the branch ignore it.
1190 #
1203 #
1191 # Once we have a list of heads, use nodesbetween to filter out all the
1204 # Once we have a list of heads, use nodesbetween to filter out all the
1192 # heads that cannot be reached from startrev. There may be a more
1205 # heads that cannot be reached from startrev. There may be a more
1193 # efficient way to do this as part of the previous algorithm.
1206 # efficient way to do this as part of the previous algorithm.
1194
1207
1195 set = util.set
1208 set = util.set
1196 heads = [self.changelog.rev(branches[branch])]
1209 heads = [self.changelog.rev(branches[branch])]
1197 # Don't care if ancestors contains nullrev or not.
1210 # Don't care if ancestors contains nullrev or not.
1198 ancestors = set(self.changelog.parentrevs(heads[0]))
1211 ancestors = set(self.changelog.parentrevs(heads[0]))
1199 for rev in xrange(heads[0] - 1, nullrev, -1):
1212 for rev in xrange(heads[0] - 1, nullrev, -1):
1200 if rev in ancestors:
1213 if rev in ancestors:
1201 ancestors.update(self.changelog.parentrevs(rev))
1214 ancestors.update(self.changelog.parentrevs(rev))
1202 ancestors.remove(rev)
1215 ancestors.remove(rev)
1203 elif self.changectx(rev).branch() == branch:
1216 elif self.changectx(rev).branch() == branch:
1204 heads.append(rev)
1217 heads.append(rev)
1205 ancestors.update(self.changelog.parentrevs(rev))
1218 ancestors.update(self.changelog.parentrevs(rev))
1206 heads = [self.changelog.node(rev) for rev in heads]
1219 heads = [self.changelog.node(rev) for rev in heads]
1207 if start is not None:
1220 if start is not None:
1208 heads = self.changelog.nodesbetween([start], heads)[2]
1221 heads = self.changelog.nodesbetween([start], heads)[2]
1209 return heads
1222 return heads
1210
1223
1211 def branches(self, nodes):
1224 def branches(self, nodes):
1212 if not nodes:
1225 if not nodes:
1213 nodes = [self.changelog.tip()]
1226 nodes = [self.changelog.tip()]
1214 b = []
1227 b = []
1215 for n in nodes:
1228 for n in nodes:
1216 t = n
1229 t = n
1217 while 1:
1230 while 1:
1218 p = self.changelog.parents(n)
1231 p = self.changelog.parents(n)
1219 if p[1] != nullid or p[0] == nullid:
1232 if p[1] != nullid or p[0] == nullid:
1220 b.append((t, n, p[0], p[1]))
1233 b.append((t, n, p[0], p[1]))
1221 break
1234 break
1222 n = p[0]
1235 n = p[0]
1223 return b
1236 return b
1224
1237
1225 def between(self, pairs):
1238 def between(self, pairs):
1226 r = []
1239 r = []
1227
1240
1228 for top, bottom in pairs:
1241 for top, bottom in pairs:
1229 n, l, i = top, [], 0
1242 n, l, i = top, [], 0
1230 f = 1
1243 f = 1
1231
1244
1232 while n != bottom:
1245 while n != bottom:
1233 p = self.changelog.parents(n)[0]
1246 p = self.changelog.parents(n)[0]
1234 if i == f:
1247 if i == f:
1235 l.append(n)
1248 l.append(n)
1236 f = f * 2
1249 f = f * 2
1237 n = p
1250 n = p
1238 i += 1
1251 i += 1
1239
1252
1240 r.append(l)
1253 r.append(l)
1241
1254
1242 return r
1255 return r
1243
1256
1244 def findincoming(self, remote, base=None, heads=None, force=False):
1257 def findincoming(self, remote, base=None, heads=None, force=False):
1245 """Return list of roots of the subsets of missing nodes from remote
1258 """Return list of roots of the subsets of missing nodes from remote
1246
1259
1247 If base dict is specified, assume that these nodes and their parents
1260 If base dict is specified, assume that these nodes and their parents
1248 exist on the remote side and that no child of a node of base exists
1261 exist on the remote side and that no child of a node of base exists
1249 in both remote and self.
1262 in both remote and self.
1250 Furthermore base will be updated to include the nodes that exists
1263 Furthermore base will be updated to include the nodes that exists
1251 in self and remote but no children exists in self and remote.
1264 in self and remote but no children exists in self and remote.
1252 If a list of heads is specified, return only nodes which are heads
1265 If a list of heads is specified, return only nodes which are heads
1253 or ancestors of these heads.
1266 or ancestors of these heads.
1254
1267
1255 All the ancestors of base are in self and in remote.
1268 All the ancestors of base are in self and in remote.
1256 All the descendants of the list returned are missing in self.
1269 All the descendants of the list returned are missing in self.
1257 (and so we know that the rest of the nodes are missing in remote, see
1270 (and so we know that the rest of the nodes are missing in remote, see
1258 outgoing)
1271 outgoing)
1259 """
1272 """
1260 m = self.changelog.nodemap
1273 m = self.changelog.nodemap
1261 search = []
1274 search = []
1262 fetch = {}
1275 fetch = {}
1263 seen = {}
1276 seen = {}
1264 seenbranch = {}
1277 seenbranch = {}
1265 if base == None:
1278 if base == None:
1266 base = {}
1279 base = {}
1267
1280
1268 if not heads:
1281 if not heads:
1269 heads = remote.heads()
1282 heads = remote.heads()
1270
1283
1271 if self.changelog.tip() == nullid:
1284 if self.changelog.tip() == nullid:
1272 base[nullid] = 1
1285 base[nullid] = 1
1273 if heads != [nullid]:
1286 if heads != [nullid]:
1274 return [nullid]
1287 return [nullid]
1275 return []
1288 return []
1276
1289
1277 # assume we're closer to the tip than the root
1290 # assume we're closer to the tip than the root
1278 # and start by examining the heads
1291 # and start by examining the heads
1279 self.ui.status(_("searching for changes\n"))
1292 self.ui.status(_("searching for changes\n"))
1280
1293
1281 unknown = []
1294 unknown = []
1282 for h in heads:
1295 for h in heads:
1283 if h not in m:
1296 if h not in m:
1284 unknown.append(h)
1297 unknown.append(h)
1285 else:
1298 else:
1286 base[h] = 1
1299 base[h] = 1
1287
1300
1288 if not unknown:
1301 if not unknown:
1289 return []
1302 return []
1290
1303
1291 req = dict.fromkeys(unknown)
1304 req = dict.fromkeys(unknown)
1292 reqcnt = 0
1305 reqcnt = 0
1293
1306
1294 # search through remote branches
1307 # search through remote branches
1295 # a 'branch' here is a linear segment of history, with four parts:
1308 # a 'branch' here is a linear segment of history, with four parts:
1296 # head, root, first parent, second parent
1309 # head, root, first parent, second parent
1297 # (a branch always has two parents (or none) by definition)
1310 # (a branch always has two parents (or none) by definition)
1298 unknown = remote.branches(unknown)
1311 unknown = remote.branches(unknown)
1299 while unknown:
1312 while unknown:
1300 r = []
1313 r = []
1301 while unknown:
1314 while unknown:
1302 n = unknown.pop(0)
1315 n = unknown.pop(0)
1303 if n[0] in seen:
1316 if n[0] in seen:
1304 continue
1317 continue
1305
1318
1306 self.ui.debug(_("examining %s:%s\n")
1319 self.ui.debug(_("examining %s:%s\n")
1307 % (short(n[0]), short(n[1])))
1320 % (short(n[0]), short(n[1])))
1308 if n[0] == nullid: # found the end of the branch
1321 if n[0] == nullid: # found the end of the branch
1309 pass
1322 pass
1310 elif n in seenbranch:
1323 elif n in seenbranch:
1311 self.ui.debug(_("branch already found\n"))
1324 self.ui.debug(_("branch already found\n"))
1312 continue
1325 continue
1313 elif n[1] and n[1] in m: # do we know the base?
1326 elif n[1] and n[1] in m: # do we know the base?
1314 self.ui.debug(_("found incomplete branch %s:%s\n")
1327 self.ui.debug(_("found incomplete branch %s:%s\n")
1315 % (short(n[0]), short(n[1])))
1328 % (short(n[0]), short(n[1])))
1316 search.append(n) # schedule branch range for scanning
1329 search.append(n) # schedule branch range for scanning
1317 seenbranch[n] = 1
1330 seenbranch[n] = 1
1318 else:
1331 else:
1319 if n[1] not in seen and n[1] not in fetch:
1332 if n[1] not in seen and n[1] not in fetch:
1320 if n[2] in m and n[3] in m:
1333 if n[2] in m and n[3] in m:
1321 self.ui.debug(_("found new changeset %s\n") %
1334 self.ui.debug(_("found new changeset %s\n") %
1322 short(n[1]))
1335 short(n[1]))
1323 fetch[n[1]] = 1 # earliest unknown
1336 fetch[n[1]] = 1 # earliest unknown
1324 for p in n[2:4]:
1337 for p in n[2:4]:
1325 if p in m:
1338 if p in m:
1326 base[p] = 1 # latest known
1339 base[p] = 1 # latest known
1327
1340
1328 for p in n[2:4]:
1341 for p in n[2:4]:
1329 if p not in req and p not in m:
1342 if p not in req and p not in m:
1330 r.append(p)
1343 r.append(p)
1331 req[p] = 1
1344 req[p] = 1
1332 seen[n[0]] = 1
1345 seen[n[0]] = 1
1333
1346
1334 if r:
1347 if r:
1335 reqcnt += 1
1348 reqcnt += 1
1336 self.ui.debug(_("request %d: %s\n") %
1349 self.ui.debug(_("request %d: %s\n") %
1337 (reqcnt, " ".join(map(short, r))))
1350 (reqcnt, " ".join(map(short, r))))
1338 for p in xrange(0, len(r), 10):
1351 for p in xrange(0, len(r), 10):
1339 for b in remote.branches(r[p:p+10]):
1352 for b in remote.branches(r[p:p+10]):
1340 self.ui.debug(_("received %s:%s\n") %
1353 self.ui.debug(_("received %s:%s\n") %
1341 (short(b[0]), short(b[1])))
1354 (short(b[0]), short(b[1])))
1342 unknown.append(b)
1355 unknown.append(b)
1343
1356
1344 # do binary search on the branches we found
1357 # do binary search on the branches we found
1345 while search:
1358 while search:
1346 n = search.pop(0)
1359 n = search.pop(0)
1347 reqcnt += 1
1360 reqcnt += 1
1348 l = remote.between([(n[0], n[1])])[0]
1361 l = remote.between([(n[0], n[1])])[0]
1349 l.append(n[1])
1362 l.append(n[1])
1350 p = n[0]
1363 p = n[0]
1351 f = 1
1364 f = 1
1352 for i in l:
1365 for i in l:
1353 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1366 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1354 if i in m:
1367 if i in m:
1355 if f <= 2:
1368 if f <= 2:
1356 self.ui.debug(_("found new branch changeset %s\n") %
1369 self.ui.debug(_("found new branch changeset %s\n") %
1357 short(p))
1370 short(p))
1358 fetch[p] = 1
1371 fetch[p] = 1
1359 base[i] = 1
1372 base[i] = 1
1360 else:
1373 else:
1361 self.ui.debug(_("narrowed branch search to %s:%s\n")
1374 self.ui.debug(_("narrowed branch search to %s:%s\n")
1362 % (short(p), short(i)))
1375 % (short(p), short(i)))
1363 search.append((p, i))
1376 search.append((p, i))
1364 break
1377 break
1365 p, f = i, f * 2
1378 p, f = i, f * 2
1366
1379
1367 # sanity check our fetch list
1380 # sanity check our fetch list
1368 for f in fetch.keys():
1381 for f in fetch.keys():
1369 if f in m:
1382 if f in m:
1370 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1383 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1371
1384
1372 if base.keys() == [nullid]:
1385 if base.keys() == [nullid]:
1373 if force:
1386 if force:
1374 self.ui.warn(_("warning: repository is unrelated\n"))
1387 self.ui.warn(_("warning: repository is unrelated\n"))
1375 else:
1388 else:
1376 raise util.Abort(_("repository is unrelated"))
1389 raise util.Abort(_("repository is unrelated"))
1377
1390
1378 self.ui.debug(_("found new changesets starting at ") +
1391 self.ui.debug(_("found new changesets starting at ") +
1379 " ".join([short(f) for f in fetch]) + "\n")
1392 " ".join([short(f) for f in fetch]) + "\n")
1380
1393
1381 self.ui.debug(_("%d total queries\n") % reqcnt)
1394 self.ui.debug(_("%d total queries\n") % reqcnt)
1382
1395
1383 return fetch.keys()
1396 return fetch.keys()
1384
1397
1385 def findoutgoing(self, remote, base=None, heads=None, force=False):
1398 def findoutgoing(self, remote, base=None, heads=None, force=False):
1386 """Return list of nodes that are roots of subsets not in remote
1399 """Return list of nodes that are roots of subsets not in remote
1387
1400
1388 If base dict is specified, assume that these nodes and their parents
1401 If base dict is specified, assume that these nodes and their parents
1389 exist on the remote side.
1402 exist on the remote side.
1390 If a list of heads is specified, return only nodes which are heads
1403 If a list of heads is specified, return only nodes which are heads
1391 or ancestors of these heads, and return a second element which
1404 or ancestors of these heads, and return a second element which
1392 contains all remote heads which get new children.
1405 contains all remote heads which get new children.
1393 """
1406 """
1394 if base == None:
1407 if base == None:
1395 base = {}
1408 base = {}
1396 self.findincoming(remote, base, heads, force=force)
1409 self.findincoming(remote, base, heads, force=force)
1397
1410
1398 self.ui.debug(_("common changesets up to ")
1411 self.ui.debug(_("common changesets up to ")
1399 + " ".join(map(short, base.keys())) + "\n")
1412 + " ".join(map(short, base.keys())) + "\n")
1400
1413
1401 remain = dict.fromkeys(self.changelog.nodemap)
1414 remain = dict.fromkeys(self.changelog.nodemap)
1402
1415
1403 # prune everything remote has from the tree
1416 # prune everything remote has from the tree
1404 del remain[nullid]
1417 del remain[nullid]
1405 remove = base.keys()
1418 remove = base.keys()
1406 while remove:
1419 while remove:
1407 n = remove.pop(0)
1420 n = remove.pop(0)
1408 if n in remain:
1421 if n in remain:
1409 del remain[n]
1422 del remain[n]
1410 for p in self.changelog.parents(n):
1423 for p in self.changelog.parents(n):
1411 remove.append(p)
1424 remove.append(p)
1412
1425
1413 # find every node whose parents have been pruned
1426 # find every node whose parents have been pruned
1414 subset = []
1427 subset = []
1415 # find every remote head that will get new children
1428 # find every remote head that will get new children
1416 updated_heads = {}
1429 updated_heads = {}
1417 for n in remain:
1430 for n in remain:
1418 p1, p2 = self.changelog.parents(n)
1431 p1, p2 = self.changelog.parents(n)
1419 if p1 not in remain and p2 not in remain:
1432 if p1 not in remain and p2 not in remain:
1420 subset.append(n)
1433 subset.append(n)
1421 if heads:
1434 if heads:
1422 if p1 in heads:
1435 if p1 in heads:
1423 updated_heads[p1] = True
1436 updated_heads[p1] = True
1424 if p2 in heads:
1437 if p2 in heads:
1425 updated_heads[p2] = True
1438 updated_heads[p2] = True
1426
1439
1427 # this is the set of all roots we have to push
1440 # this is the set of all roots we have to push
1428 if heads:
1441 if heads:
1429 return subset, updated_heads.keys()
1442 return subset, updated_heads.keys()
1430 else:
1443 else:
1431 return subset
1444 return subset
1432
1445
1433 def pull(self, remote, heads=None, force=False):
1446 def pull(self, remote, heads=None, force=False):
1434 lock = self.lock()
1447 lock = self.lock()
1435 try:
1448 try:
1436 fetch = self.findincoming(remote, heads=heads, force=force)
1449 fetch = self.findincoming(remote, heads=heads, force=force)
1437 if fetch == [nullid]:
1450 if fetch == [nullid]:
1438 self.ui.status(_("requesting all changes\n"))
1451 self.ui.status(_("requesting all changes\n"))
1439
1452
1440 if not fetch:
1453 if not fetch:
1441 self.ui.status(_("no changes found\n"))
1454 self.ui.status(_("no changes found\n"))
1442 return 0
1455 return 0
1443
1456
1444 if heads is None:
1457 if heads is None:
1445 cg = remote.changegroup(fetch, 'pull')
1458 cg = remote.changegroup(fetch, 'pull')
1446 else:
1459 else:
1447 if 'changegroupsubset' not in remote.capabilities:
1460 if 'changegroupsubset' not in remote.capabilities:
1448 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1461 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1449 cg = remote.changegroupsubset(fetch, heads, 'pull')
1462 cg = remote.changegroupsubset(fetch, heads, 'pull')
1450 return self.addchangegroup(cg, 'pull', remote.url())
1463 return self.addchangegroup(cg, 'pull', remote.url())
1451 finally:
1464 finally:
1452 del lock
1465 del lock
1453
1466
1454 def push(self, remote, force=False, revs=None):
1467 def push(self, remote, force=False, revs=None):
1455 # there are two ways to push to remote repo:
1468 # there are two ways to push to remote repo:
1456 #
1469 #
1457 # addchangegroup assumes local user can lock remote
1470 # addchangegroup assumes local user can lock remote
1458 # repo (local filesystem, old ssh servers).
1471 # repo (local filesystem, old ssh servers).
1459 #
1472 #
1460 # unbundle assumes local user cannot lock remote repo (new ssh
1473 # unbundle assumes local user cannot lock remote repo (new ssh
1461 # servers, http servers).
1474 # servers, http servers).
1462
1475
1463 if remote.capable('unbundle'):
1476 if remote.capable('unbundle'):
1464 return self.push_unbundle(remote, force, revs)
1477 return self.push_unbundle(remote, force, revs)
1465 return self.push_addchangegroup(remote, force, revs)
1478 return self.push_addchangegroup(remote, force, revs)
1466
1479
1467 def prepush(self, remote, force, revs):
1480 def prepush(self, remote, force, revs):
1468 base = {}
1481 base = {}
1469 remote_heads = remote.heads()
1482 remote_heads = remote.heads()
1470 inc = self.findincoming(remote, base, remote_heads, force=force)
1483 inc = self.findincoming(remote, base, remote_heads, force=force)
1471
1484
1472 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1485 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1473 if revs is not None:
1486 if revs is not None:
1474 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1487 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1475 else:
1488 else:
1476 bases, heads = update, self.changelog.heads()
1489 bases, heads = update, self.changelog.heads()
1477
1490
1478 if not bases:
1491 if not bases:
1479 self.ui.status(_("no changes found\n"))
1492 self.ui.status(_("no changes found\n"))
1480 return None, 1
1493 return None, 1
1481 elif not force:
1494 elif not force:
1482 # check if we're creating new remote heads
1495 # check if we're creating new remote heads
1483 # to be a remote head after push, node must be either
1496 # to be a remote head after push, node must be either
1484 # - unknown locally
1497 # - unknown locally
1485 # - a local outgoing head descended from update
1498 # - a local outgoing head descended from update
1486 # - a remote head that's known locally and not
1499 # - a remote head that's known locally and not
1487 # ancestral to an outgoing head
1500 # ancestral to an outgoing head
1488
1501
1489 warn = 0
1502 warn = 0
1490
1503
1491 if remote_heads == [nullid]:
1504 if remote_heads == [nullid]:
1492 warn = 0
1505 warn = 0
1493 elif not revs and len(heads) > len(remote_heads):
1506 elif not revs and len(heads) > len(remote_heads):
1494 warn = 1
1507 warn = 1
1495 else:
1508 else:
1496 newheads = list(heads)
1509 newheads = list(heads)
1497 for r in remote_heads:
1510 for r in remote_heads:
1498 if r in self.changelog.nodemap:
1511 if r in self.changelog.nodemap:
1499 desc = self.changelog.heads(r, heads)
1512 desc = self.changelog.heads(r, heads)
1500 l = [h for h in heads if h in desc]
1513 l = [h for h in heads if h in desc]
1501 if not l:
1514 if not l:
1502 newheads.append(r)
1515 newheads.append(r)
1503 else:
1516 else:
1504 newheads.append(r)
1517 newheads.append(r)
1505 if len(newheads) > len(remote_heads):
1518 if len(newheads) > len(remote_heads):
1506 warn = 1
1519 warn = 1
1507
1520
1508 if warn:
1521 if warn:
1509 self.ui.warn(_("abort: push creates new remote branches!\n"))
1522 self.ui.warn(_("abort: push creates new remote branches!\n"))
1510 self.ui.status(_("(did you forget to merge?"
1523 self.ui.status(_("(did you forget to merge?"
1511 " use push -f to force)\n"))
1524 " use push -f to force)\n"))
1512 return None, 1
1525 return None, 1
1513 elif inc:
1526 elif inc:
1514 self.ui.warn(_("note: unsynced remote changes!\n"))
1527 self.ui.warn(_("note: unsynced remote changes!\n"))
1515
1528
1516
1529
1517 if revs is None:
1530 if revs is None:
1518 cg = self.changegroup(update, 'push')
1531 cg = self.changegroup(update, 'push')
1519 else:
1532 else:
1520 cg = self.changegroupsubset(update, revs, 'push')
1533 cg = self.changegroupsubset(update, revs, 'push')
1521 return cg, remote_heads
1534 return cg, remote_heads
1522
1535
1523 def push_addchangegroup(self, remote, force, revs):
1536 def push_addchangegroup(self, remote, force, revs):
1524 lock = remote.lock()
1537 lock = remote.lock()
1525 try:
1538 try:
1526 ret = self.prepush(remote, force, revs)
1539 ret = self.prepush(remote, force, revs)
1527 if ret[0] is not None:
1540 if ret[0] is not None:
1528 cg, remote_heads = ret
1541 cg, remote_heads = ret
1529 return remote.addchangegroup(cg, 'push', self.url())
1542 return remote.addchangegroup(cg, 'push', self.url())
1530 return ret[1]
1543 return ret[1]
1531 finally:
1544 finally:
1532 del lock
1545 del lock
1533
1546
1534 def push_unbundle(self, remote, force, revs):
1547 def push_unbundle(self, remote, force, revs):
1535 # local repo finds heads on server, finds out what revs it
1548 # local repo finds heads on server, finds out what revs it
1536 # must push. once revs transferred, if server finds it has
1549 # must push. once revs transferred, if server finds it has
1537 # different heads (someone else won commit/push race), server
1550 # different heads (someone else won commit/push race), server
1538 # aborts.
1551 # aborts.
1539
1552
1540 ret = self.prepush(remote, force, revs)
1553 ret = self.prepush(remote, force, revs)
1541 if ret[0] is not None:
1554 if ret[0] is not None:
1542 cg, remote_heads = ret
1555 cg, remote_heads = ret
1543 if force: remote_heads = ['force']
1556 if force: remote_heads = ['force']
1544 return remote.unbundle(cg, remote_heads, 'push')
1557 return remote.unbundle(cg, remote_heads, 'push')
1545 return ret[1]
1558 return ret[1]
1546
1559
1547 def changegroupinfo(self, nodes, source):
1560 def changegroupinfo(self, nodes, source):
1548 if self.ui.verbose or source == 'bundle':
1561 if self.ui.verbose or source == 'bundle':
1549 self.ui.status(_("%d changesets found\n") % len(nodes))
1562 self.ui.status(_("%d changesets found\n") % len(nodes))
1550 if self.ui.debugflag:
1563 if self.ui.debugflag:
1551 self.ui.debug(_("List of changesets:\n"))
1564 self.ui.debug(_("List of changesets:\n"))
1552 for node in nodes:
1565 for node in nodes:
1553 self.ui.debug("%s\n" % hex(node))
1566 self.ui.debug("%s\n" % hex(node))
1554
1567
1555 def changegroupsubset(self, bases, heads, source, extranodes=None):
1568 def changegroupsubset(self, bases, heads, source, extranodes=None):
1556 """This function generates a changegroup consisting of all the nodes
1569 """This function generates a changegroup consisting of all the nodes
1557 that are descendents of any of the bases, and ancestors of any of
1570 that are descendents of any of the bases, and ancestors of any of
1558 the heads.
1571 the heads.
1559
1572
1560 It is fairly complex as determining which filenodes and which
1573 It is fairly complex as determining which filenodes and which
1561 manifest nodes need to be included for the changeset to be complete
1574 manifest nodes need to be included for the changeset to be complete
1562 is non-trivial.
1575 is non-trivial.
1563
1576
1564 Another wrinkle is doing the reverse, figuring out which changeset in
1577 Another wrinkle is doing the reverse, figuring out which changeset in
1565 the changegroup a particular filenode or manifestnode belongs to.
1578 the changegroup a particular filenode or manifestnode belongs to.
1566
1579
1567 The caller can specify some nodes that must be included in the
1580 The caller can specify some nodes that must be included in the
1568 changegroup using the extranodes argument. It should be a dict
1581 changegroup using the extranodes argument. It should be a dict
1569 where the keys are the filenames (or 1 for the manifest), and the
1582 where the keys are the filenames (or 1 for the manifest), and the
1570 values are lists of (node, linknode) tuples, where node is a wanted
1583 values are lists of (node, linknode) tuples, where node is a wanted
1571 node and linknode is the changelog node that should be transmitted as
1584 node and linknode is the changelog node that should be transmitted as
1572 the linkrev.
1585 the linkrev.
1573 """
1586 """
1574
1587
1575 self.hook('preoutgoing', throw=True, source=source)
1588 self.hook('preoutgoing', throw=True, source=source)
1576
1589
1577 # Set up some initial variables
1590 # Set up some initial variables
1578 # Make it easy to refer to self.changelog
1591 # Make it easy to refer to self.changelog
1579 cl = self.changelog
1592 cl = self.changelog
1580 # msng is short for missing - compute the list of changesets in this
1593 # msng is short for missing - compute the list of changesets in this
1581 # changegroup.
1594 # changegroup.
1582 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1595 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1583 self.changegroupinfo(msng_cl_lst, source)
1596 self.changegroupinfo(msng_cl_lst, source)
1584 # Some bases may turn out to be superfluous, and some heads may be
1597 # Some bases may turn out to be superfluous, and some heads may be
1585 # too. nodesbetween will return the minimal set of bases and heads
1598 # too. nodesbetween will return the minimal set of bases and heads
1586 # necessary to re-create the changegroup.
1599 # necessary to re-create the changegroup.
1587
1600
1588 # Known heads are the list of heads that it is assumed the recipient
1601 # Known heads are the list of heads that it is assumed the recipient
1589 # of this changegroup will know about.
1602 # of this changegroup will know about.
1590 knownheads = {}
1603 knownheads = {}
1591 # We assume that all parents of bases are known heads.
1604 # We assume that all parents of bases are known heads.
1592 for n in bases:
1605 for n in bases:
1593 for p in cl.parents(n):
1606 for p in cl.parents(n):
1594 if p != nullid:
1607 if p != nullid:
1595 knownheads[p] = 1
1608 knownheads[p] = 1
1596 knownheads = knownheads.keys()
1609 knownheads = knownheads.keys()
1597 if knownheads:
1610 if knownheads:
1598 # Now that we know what heads are known, we can compute which
1611 # Now that we know what heads are known, we can compute which
1599 # changesets are known. The recipient must know about all
1612 # changesets are known. The recipient must know about all
1600 # changesets required to reach the known heads from the null
1613 # changesets required to reach the known heads from the null
1601 # changeset.
1614 # changeset.
1602 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1615 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1603 junk = None
1616 junk = None
1604 # Transform the list into an ersatz set.
1617 # Transform the list into an ersatz set.
1605 has_cl_set = dict.fromkeys(has_cl_set)
1618 has_cl_set = dict.fromkeys(has_cl_set)
1606 else:
1619 else:
1607 # If there were no known heads, the recipient cannot be assumed to
1620 # If there were no known heads, the recipient cannot be assumed to
1608 # know about any changesets.
1621 # know about any changesets.
1609 has_cl_set = {}
1622 has_cl_set = {}
1610
1623
1611 # Make it easy to refer to self.manifest
1624 # Make it easy to refer to self.manifest
1612 mnfst = self.manifest
1625 mnfst = self.manifest
1613 # We don't know which manifests are missing yet
1626 # We don't know which manifests are missing yet
1614 msng_mnfst_set = {}
1627 msng_mnfst_set = {}
1615 # Nor do we know which filenodes are missing.
1628 # Nor do we know which filenodes are missing.
1616 msng_filenode_set = {}
1629 msng_filenode_set = {}
1617
1630
1618 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1631 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1619 junk = None
1632 junk = None
1620
1633
1621 # A changeset always belongs to itself, so the changenode lookup
1634 # A changeset always belongs to itself, so the changenode lookup
1622 # function for a changenode is identity.
1635 # function for a changenode is identity.
1623 def identity(x):
1636 def identity(x):
1624 return x
1637 return x
1625
1638
1626 # A function generating function. Sets up an environment for the
1639 # A function generating function. Sets up an environment for the
1627 # inner function.
1640 # inner function.
1628 def cmp_by_rev_func(revlog):
1641 def cmp_by_rev_func(revlog):
1629 # Compare two nodes by their revision number in the environment's
1642 # Compare two nodes by their revision number in the environment's
1630 # revision history. Since the revision number both represents the
1643 # revision history. Since the revision number both represents the
1631 # most efficient order to read the nodes in, and represents a
1644 # most efficient order to read the nodes in, and represents a
1632 # topological sorting of the nodes, this function is often useful.
1645 # topological sorting of the nodes, this function is often useful.
1633 def cmp_by_rev(a, b):
1646 def cmp_by_rev(a, b):
1634 return cmp(revlog.rev(a), revlog.rev(b))
1647 return cmp(revlog.rev(a), revlog.rev(b))
1635 return cmp_by_rev
1648 return cmp_by_rev
1636
1649
1637 # If we determine that a particular file or manifest node must be a
1650 # If we determine that a particular file or manifest node must be a
1638 # node that the recipient of the changegroup will already have, we can
1651 # node that the recipient of the changegroup will already have, we can
1639 # also assume the recipient will have all the parents. This function
1652 # also assume the recipient will have all the parents. This function
1640 # prunes them from the set of missing nodes.
1653 # prunes them from the set of missing nodes.
1641 def prune_parents(revlog, hasset, msngset):
1654 def prune_parents(revlog, hasset, msngset):
1642 haslst = hasset.keys()
1655 haslst = hasset.keys()
1643 haslst.sort(cmp_by_rev_func(revlog))
1656 haslst.sort(cmp_by_rev_func(revlog))
1644 for node in haslst:
1657 for node in haslst:
1645 parentlst = [p for p in revlog.parents(node) if p != nullid]
1658 parentlst = [p for p in revlog.parents(node) if p != nullid]
1646 while parentlst:
1659 while parentlst:
1647 n = parentlst.pop()
1660 n = parentlst.pop()
1648 if n not in hasset:
1661 if n not in hasset:
1649 hasset[n] = 1
1662 hasset[n] = 1
1650 p = [p for p in revlog.parents(n) if p != nullid]
1663 p = [p for p in revlog.parents(n) if p != nullid]
1651 parentlst.extend(p)
1664 parentlst.extend(p)
1652 for n in hasset:
1665 for n in hasset:
1653 msngset.pop(n, None)
1666 msngset.pop(n, None)
1654
1667
1655 # This is a function generating function used to set up an environment
1668 # This is a function generating function used to set up an environment
1656 # for the inner function to execute in.
1669 # for the inner function to execute in.
1657 def manifest_and_file_collector(changedfileset):
1670 def manifest_and_file_collector(changedfileset):
1658 # This is an information gathering function that gathers
1671 # This is an information gathering function that gathers
1659 # information from each changeset node that goes out as part of
1672 # information from each changeset node that goes out as part of
1660 # the changegroup. The information gathered is a list of which
1673 # the changegroup. The information gathered is a list of which
1661 # manifest nodes are potentially required (the recipient may
1674 # manifest nodes are potentially required (the recipient may
1662 # already have them) and total list of all files which were
1675 # already have them) and total list of all files which were
1663 # changed in any changeset in the changegroup.
1676 # changed in any changeset in the changegroup.
1664 #
1677 #
1665 # We also remember the first changenode we saw any manifest
1678 # We also remember the first changenode we saw any manifest
1666 # referenced by so we can later determine which changenode 'owns'
1679 # referenced by so we can later determine which changenode 'owns'
1667 # the manifest.
1680 # the manifest.
1668 def collect_manifests_and_files(clnode):
1681 def collect_manifests_and_files(clnode):
1669 c = cl.read(clnode)
1682 c = cl.read(clnode)
1670 for f in c[3]:
1683 for f in c[3]:
1671 # This is to make sure we only have one instance of each
1684 # This is to make sure we only have one instance of each
1672 # filename string for each filename.
1685 # filename string for each filename.
1673 changedfileset.setdefault(f, f)
1686 changedfileset.setdefault(f, f)
1674 msng_mnfst_set.setdefault(c[0], clnode)
1687 msng_mnfst_set.setdefault(c[0], clnode)
1675 return collect_manifests_and_files
1688 return collect_manifests_and_files
1676
1689
1677 # Figure out which manifest nodes (of the ones we think might be part
1690 # Figure out which manifest nodes (of the ones we think might be part
1678 # of the changegroup) the recipient must know about and remove them
1691 # of the changegroup) the recipient must know about and remove them
1679 # from the changegroup.
1692 # from the changegroup.
1680 def prune_manifests():
1693 def prune_manifests():
1681 has_mnfst_set = {}
1694 has_mnfst_set = {}
1682 for n in msng_mnfst_set:
1695 for n in msng_mnfst_set:
1683 # If a 'missing' manifest thinks it belongs to a changenode
1696 # If a 'missing' manifest thinks it belongs to a changenode
1684 # the recipient is assumed to have, obviously the recipient
1697 # the recipient is assumed to have, obviously the recipient
1685 # must have that manifest.
1698 # must have that manifest.
1686 linknode = cl.node(mnfst.linkrev(n))
1699 linknode = cl.node(mnfst.linkrev(n))
1687 if linknode in has_cl_set:
1700 if linknode in has_cl_set:
1688 has_mnfst_set[n] = 1
1701 has_mnfst_set[n] = 1
1689 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1702 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1690
1703
1691 # Use the information collected in collect_manifests_and_files to say
1704 # Use the information collected in collect_manifests_and_files to say
1692 # which changenode any manifestnode belongs to.
1705 # which changenode any manifestnode belongs to.
1693 def lookup_manifest_link(mnfstnode):
1706 def lookup_manifest_link(mnfstnode):
1694 return msng_mnfst_set[mnfstnode]
1707 return msng_mnfst_set[mnfstnode]
1695
1708
1696 # A function generating function that sets up the initial environment
1709 # A function generating function that sets up the initial environment
1697 # the inner function.
1710 # the inner function.
1698 def filenode_collector(changedfiles):
1711 def filenode_collector(changedfiles):
1699 next_rev = [0]
1712 next_rev = [0]
1700 # This gathers information from each manifestnode included in the
1713 # This gathers information from each manifestnode included in the
1701 # changegroup about which filenodes the manifest node references
1714 # changegroup about which filenodes the manifest node references
1702 # so we can include those in the changegroup too.
1715 # so we can include those in the changegroup too.
1703 #
1716 #
1704 # It also remembers which changenode each filenode belongs to. It
1717 # It also remembers which changenode each filenode belongs to. It
1705 # does this by assuming the a filenode belongs to the changenode
1718 # does this by assuming the a filenode belongs to the changenode
1706 # the first manifest that references it belongs to.
1719 # the first manifest that references it belongs to.
1707 def collect_msng_filenodes(mnfstnode):
1720 def collect_msng_filenodes(mnfstnode):
1708 r = mnfst.rev(mnfstnode)
1721 r = mnfst.rev(mnfstnode)
1709 if r == next_rev[0]:
1722 if r == next_rev[0]:
1710 # If the last rev we looked at was the one just previous,
1723 # If the last rev we looked at was the one just previous,
1711 # we only need to see a diff.
1724 # we only need to see a diff.
1712 deltamf = mnfst.readdelta(mnfstnode)
1725 deltamf = mnfst.readdelta(mnfstnode)
1713 # For each line in the delta
1726 # For each line in the delta
1714 for f, fnode in deltamf.items():
1727 for f, fnode in deltamf.items():
1715 f = changedfiles.get(f, None)
1728 f = changedfiles.get(f, None)
1716 # And if the file is in the list of files we care
1729 # And if the file is in the list of files we care
1717 # about.
1730 # about.
1718 if f is not None:
1731 if f is not None:
1719 # Get the changenode this manifest belongs to
1732 # Get the changenode this manifest belongs to
1720 clnode = msng_mnfst_set[mnfstnode]
1733 clnode = msng_mnfst_set[mnfstnode]
1721 # Create the set of filenodes for the file if
1734 # Create the set of filenodes for the file if
1722 # there isn't one already.
1735 # there isn't one already.
1723 ndset = msng_filenode_set.setdefault(f, {})
1736 ndset = msng_filenode_set.setdefault(f, {})
1724 # And set the filenode's changelog node to the
1737 # And set the filenode's changelog node to the
1725 # manifest's if it hasn't been set already.
1738 # manifest's if it hasn't been set already.
1726 ndset.setdefault(fnode, clnode)
1739 ndset.setdefault(fnode, clnode)
1727 else:
1740 else:
1728 # Otherwise we need a full manifest.
1741 # Otherwise we need a full manifest.
1729 m = mnfst.read(mnfstnode)
1742 m = mnfst.read(mnfstnode)
1730 # For every file in we care about.
1743 # For every file in we care about.
1731 for f in changedfiles:
1744 for f in changedfiles:
1732 fnode = m.get(f, None)
1745 fnode = m.get(f, None)
1733 # If it's in the manifest
1746 # If it's in the manifest
1734 if fnode is not None:
1747 if fnode is not None:
1735 # See comments above.
1748 # See comments above.
1736 clnode = msng_mnfst_set[mnfstnode]
1749 clnode = msng_mnfst_set[mnfstnode]
1737 ndset = msng_filenode_set.setdefault(f, {})
1750 ndset = msng_filenode_set.setdefault(f, {})
1738 ndset.setdefault(fnode, clnode)
1751 ndset.setdefault(fnode, clnode)
1739 # Remember the revision we hope to see next.
1752 # Remember the revision we hope to see next.
1740 next_rev[0] = r + 1
1753 next_rev[0] = r + 1
1741 return collect_msng_filenodes
1754 return collect_msng_filenodes
1742
1755
1743 # We have a list of filenodes we think we need for a file, lets remove
1756 # We have a list of filenodes we think we need for a file, lets remove
1744 # all those we now the recipient must have.
1757 # all those we now the recipient must have.
1745 def prune_filenodes(f, filerevlog):
1758 def prune_filenodes(f, filerevlog):
1746 msngset = msng_filenode_set[f]
1759 msngset = msng_filenode_set[f]
1747 hasset = {}
1760 hasset = {}
1748 # If a 'missing' filenode thinks it belongs to a changenode we
1761 # If a 'missing' filenode thinks it belongs to a changenode we
1749 # assume the recipient must have, then the recipient must have
1762 # assume the recipient must have, then the recipient must have
1750 # that filenode.
1763 # that filenode.
1751 for n in msngset:
1764 for n in msngset:
1752 clnode = cl.node(filerevlog.linkrev(n))
1765 clnode = cl.node(filerevlog.linkrev(n))
1753 if clnode in has_cl_set:
1766 if clnode in has_cl_set:
1754 hasset[n] = 1
1767 hasset[n] = 1
1755 prune_parents(filerevlog, hasset, msngset)
1768 prune_parents(filerevlog, hasset, msngset)
1756
1769
1757 # A function generator function that sets up the a context for the
1770 # A function generator function that sets up the a context for the
1758 # inner function.
1771 # inner function.
1759 def lookup_filenode_link_func(fname):
1772 def lookup_filenode_link_func(fname):
1760 msngset = msng_filenode_set[fname]
1773 msngset = msng_filenode_set[fname]
1761 # Lookup the changenode the filenode belongs to.
1774 # Lookup the changenode the filenode belongs to.
1762 def lookup_filenode_link(fnode):
1775 def lookup_filenode_link(fnode):
1763 return msngset[fnode]
1776 return msngset[fnode]
1764 return lookup_filenode_link
1777 return lookup_filenode_link
1765
1778
1766 # Add the nodes that were explicitly requested.
1779 # Add the nodes that were explicitly requested.
1767 def add_extra_nodes(name, nodes):
1780 def add_extra_nodes(name, nodes):
1768 if not extranodes or name not in extranodes:
1781 if not extranodes or name not in extranodes:
1769 return
1782 return
1770
1783
1771 for node, linknode in extranodes[name]:
1784 for node, linknode in extranodes[name]:
1772 if node not in nodes:
1785 if node not in nodes:
1773 nodes[node] = linknode
1786 nodes[node] = linknode
1774
1787
1775 # Now that we have all theses utility functions to help out and
1788 # Now that we have all theses utility functions to help out and
1776 # logically divide up the task, generate the group.
1789 # logically divide up the task, generate the group.
1777 def gengroup():
1790 def gengroup():
1778 # The set of changed files starts empty.
1791 # The set of changed files starts empty.
1779 changedfiles = {}
1792 changedfiles = {}
1780 # Create a changenode group generator that will call our functions
1793 # Create a changenode group generator that will call our functions
1781 # back to lookup the owning changenode and collect information.
1794 # back to lookup the owning changenode and collect information.
1782 group = cl.group(msng_cl_lst, identity,
1795 group = cl.group(msng_cl_lst, identity,
1783 manifest_and_file_collector(changedfiles))
1796 manifest_and_file_collector(changedfiles))
1784 for chnk in group:
1797 for chnk in group:
1785 yield chnk
1798 yield chnk
1786
1799
1787 # The list of manifests has been collected by the generator
1800 # The list of manifests has been collected by the generator
1788 # calling our functions back.
1801 # calling our functions back.
1789 prune_manifests()
1802 prune_manifests()
1790 add_extra_nodes(1, msng_mnfst_set)
1803 add_extra_nodes(1, msng_mnfst_set)
1791 msng_mnfst_lst = msng_mnfst_set.keys()
1804 msng_mnfst_lst = msng_mnfst_set.keys()
1792 # Sort the manifestnodes by revision number.
1805 # Sort the manifestnodes by revision number.
1793 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1806 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1794 # Create a generator for the manifestnodes that calls our lookup
1807 # Create a generator for the manifestnodes that calls our lookup
1795 # and data collection functions back.
1808 # and data collection functions back.
1796 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1809 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1797 filenode_collector(changedfiles))
1810 filenode_collector(changedfiles))
1798 for chnk in group:
1811 for chnk in group:
1799 yield chnk
1812 yield chnk
1800
1813
1801 # These are no longer needed, dereference and toss the memory for
1814 # These are no longer needed, dereference and toss the memory for
1802 # them.
1815 # them.
1803 msng_mnfst_lst = None
1816 msng_mnfst_lst = None
1804 msng_mnfst_set.clear()
1817 msng_mnfst_set.clear()
1805
1818
1806 if extranodes:
1819 if extranodes:
1807 for fname in extranodes:
1820 for fname in extranodes:
1808 if isinstance(fname, int):
1821 if isinstance(fname, int):
1809 continue
1822 continue
1810 add_extra_nodes(fname,
1823 add_extra_nodes(fname,
1811 msng_filenode_set.setdefault(fname, {}))
1824 msng_filenode_set.setdefault(fname, {}))
1812 changedfiles[fname] = 1
1825 changedfiles[fname] = 1
1813 changedfiles = changedfiles.keys()
1826 changedfiles = changedfiles.keys()
1814 changedfiles.sort()
1827 changedfiles.sort()
1815 # Go through all our files in order sorted by name.
1828 # Go through all our files in order sorted by name.
1816 for fname in changedfiles:
1829 for fname in changedfiles:
1817 filerevlog = self.file(fname)
1830 filerevlog = self.file(fname)
1818 if filerevlog.count() == 0:
1831 if filerevlog.count() == 0:
1819 raise util.Abort(_("empty or missing revlog for %s") % fname)
1832 raise util.Abort(_("empty or missing revlog for %s") % fname)
1820 # Toss out the filenodes that the recipient isn't really
1833 # Toss out the filenodes that the recipient isn't really
1821 # missing.
1834 # missing.
1822 if fname in msng_filenode_set:
1835 if fname in msng_filenode_set:
1823 prune_filenodes(fname, filerevlog)
1836 prune_filenodes(fname, filerevlog)
1824 msng_filenode_lst = msng_filenode_set[fname].keys()
1837 msng_filenode_lst = msng_filenode_set[fname].keys()
1825 else:
1838 else:
1826 msng_filenode_lst = []
1839 msng_filenode_lst = []
1827 # If any filenodes are left, generate the group for them,
1840 # If any filenodes are left, generate the group for them,
1828 # otherwise don't bother.
1841 # otherwise don't bother.
1829 if len(msng_filenode_lst) > 0:
1842 if len(msng_filenode_lst) > 0:
1830 yield changegroup.chunkheader(len(fname))
1843 yield changegroup.chunkheader(len(fname))
1831 yield fname
1844 yield fname
1832 # Sort the filenodes by their revision #
1845 # Sort the filenodes by their revision #
1833 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1846 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1834 # Create a group generator and only pass in a changenode
1847 # Create a group generator and only pass in a changenode
1835 # lookup function as we need to collect no information
1848 # lookup function as we need to collect no information
1836 # from filenodes.
1849 # from filenodes.
1837 group = filerevlog.group(msng_filenode_lst,
1850 group = filerevlog.group(msng_filenode_lst,
1838 lookup_filenode_link_func(fname))
1851 lookup_filenode_link_func(fname))
1839 for chnk in group:
1852 for chnk in group:
1840 yield chnk
1853 yield chnk
1841 if fname in msng_filenode_set:
1854 if fname in msng_filenode_set:
1842 # Don't need this anymore, toss it to free memory.
1855 # Don't need this anymore, toss it to free memory.
1843 del msng_filenode_set[fname]
1856 del msng_filenode_set[fname]
1844 # Signal that no more groups are left.
1857 # Signal that no more groups are left.
1845 yield changegroup.closechunk()
1858 yield changegroup.closechunk()
1846
1859
1847 if msng_cl_lst:
1860 if msng_cl_lst:
1848 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1861 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1849
1862
1850 return util.chunkbuffer(gengroup())
1863 return util.chunkbuffer(gengroup())
1851
1864
1852 def changegroup(self, basenodes, source):
1865 def changegroup(self, basenodes, source):
1853 """Generate a changegroup of all nodes that we have that a recipient
1866 """Generate a changegroup of all nodes that we have that a recipient
1854 doesn't.
1867 doesn't.
1855
1868
1856 This is much easier than the previous function as we can assume that
1869 This is much easier than the previous function as we can assume that
1857 the recipient has any changenode we aren't sending them."""
1870 the recipient has any changenode we aren't sending them."""
1858
1871
1859 self.hook('preoutgoing', throw=True, source=source)
1872 self.hook('preoutgoing', throw=True, source=source)
1860
1873
1861 cl = self.changelog
1874 cl = self.changelog
1862 nodes = cl.nodesbetween(basenodes, None)[0]
1875 nodes = cl.nodesbetween(basenodes, None)[0]
1863 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1876 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1864 self.changegroupinfo(nodes, source)
1877 self.changegroupinfo(nodes, source)
1865
1878
1866 def identity(x):
1879 def identity(x):
1867 return x
1880 return x
1868
1881
1869 def gennodelst(revlog):
1882 def gennodelst(revlog):
1870 for r in xrange(0, revlog.count()):
1883 for r in xrange(0, revlog.count()):
1871 n = revlog.node(r)
1884 n = revlog.node(r)
1872 if revlog.linkrev(n) in revset:
1885 if revlog.linkrev(n) in revset:
1873 yield n
1886 yield n
1874
1887
1875 def changed_file_collector(changedfileset):
1888 def changed_file_collector(changedfileset):
1876 def collect_changed_files(clnode):
1889 def collect_changed_files(clnode):
1877 c = cl.read(clnode)
1890 c = cl.read(clnode)
1878 for fname in c[3]:
1891 for fname in c[3]:
1879 changedfileset[fname] = 1
1892 changedfileset[fname] = 1
1880 return collect_changed_files
1893 return collect_changed_files
1881
1894
1882 def lookuprevlink_func(revlog):
1895 def lookuprevlink_func(revlog):
1883 def lookuprevlink(n):
1896 def lookuprevlink(n):
1884 return cl.node(revlog.linkrev(n))
1897 return cl.node(revlog.linkrev(n))
1885 return lookuprevlink
1898 return lookuprevlink
1886
1899
1887 def gengroup():
1900 def gengroup():
1888 # construct a list of all changed files
1901 # construct a list of all changed files
1889 changedfiles = {}
1902 changedfiles = {}
1890
1903
1891 for chnk in cl.group(nodes, identity,
1904 for chnk in cl.group(nodes, identity,
1892 changed_file_collector(changedfiles)):
1905 changed_file_collector(changedfiles)):
1893 yield chnk
1906 yield chnk
1894 changedfiles = changedfiles.keys()
1907 changedfiles = changedfiles.keys()
1895 changedfiles.sort()
1908 changedfiles.sort()
1896
1909
1897 mnfst = self.manifest
1910 mnfst = self.manifest
1898 nodeiter = gennodelst(mnfst)
1911 nodeiter = gennodelst(mnfst)
1899 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1912 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1900 yield chnk
1913 yield chnk
1901
1914
1902 for fname in changedfiles:
1915 for fname in changedfiles:
1903 filerevlog = self.file(fname)
1916 filerevlog = self.file(fname)
1904 if filerevlog.count() == 0:
1917 if filerevlog.count() == 0:
1905 raise util.Abort(_("empty or missing revlog for %s") % fname)
1918 raise util.Abort(_("empty or missing revlog for %s") % fname)
1906 nodeiter = gennodelst(filerevlog)
1919 nodeiter = gennodelst(filerevlog)
1907 nodeiter = list(nodeiter)
1920 nodeiter = list(nodeiter)
1908 if nodeiter:
1921 if nodeiter:
1909 yield changegroup.chunkheader(len(fname))
1922 yield changegroup.chunkheader(len(fname))
1910 yield fname
1923 yield fname
1911 lookup = lookuprevlink_func(filerevlog)
1924 lookup = lookuprevlink_func(filerevlog)
1912 for chnk in filerevlog.group(nodeiter, lookup):
1925 for chnk in filerevlog.group(nodeiter, lookup):
1913 yield chnk
1926 yield chnk
1914
1927
1915 yield changegroup.closechunk()
1928 yield changegroup.closechunk()
1916
1929
1917 if nodes:
1930 if nodes:
1918 self.hook('outgoing', node=hex(nodes[0]), source=source)
1931 self.hook('outgoing', node=hex(nodes[0]), source=source)
1919
1932
1920 return util.chunkbuffer(gengroup())
1933 return util.chunkbuffer(gengroup())
1921
1934
1922 def addchangegroup(self, source, srctype, url, emptyok=False):
1935 def addchangegroup(self, source, srctype, url, emptyok=False):
1923 """add changegroup to repo.
1936 """add changegroup to repo.
1924
1937
1925 return values:
1938 return values:
1926 - nothing changed or no source: 0
1939 - nothing changed or no source: 0
1927 - more heads than before: 1+added heads (2..n)
1940 - more heads than before: 1+added heads (2..n)
1928 - less heads than before: -1-removed heads (-2..-n)
1941 - less heads than before: -1-removed heads (-2..-n)
1929 - number of heads stays the same: 1
1942 - number of heads stays the same: 1
1930 """
1943 """
1931 def csmap(x):
1944 def csmap(x):
1932 self.ui.debug(_("add changeset %s\n") % short(x))
1945 self.ui.debug(_("add changeset %s\n") % short(x))
1933 return cl.count()
1946 return cl.count()
1934
1947
1935 def revmap(x):
1948 def revmap(x):
1936 return cl.rev(x)
1949 return cl.rev(x)
1937
1950
1938 if not source:
1951 if not source:
1939 return 0
1952 return 0
1940
1953
1941 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1954 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1942
1955
1943 changesets = files = revisions = 0
1956 changesets = files = revisions = 0
1944
1957
1945 # write changelog data to temp files so concurrent readers will not see
1958 # write changelog data to temp files so concurrent readers will not see
1946 # inconsistent view
1959 # inconsistent view
1947 cl = self.changelog
1960 cl = self.changelog
1948 cl.delayupdate()
1961 cl.delayupdate()
1949 oldheads = len(cl.heads())
1962 oldheads = len(cl.heads())
1950
1963
1951 tr = self.transaction()
1964 tr = self.transaction()
1952 try:
1965 try:
1953 trp = weakref.proxy(tr)
1966 trp = weakref.proxy(tr)
1954 # pull off the changeset group
1967 # pull off the changeset group
1955 self.ui.status(_("adding changesets\n"))
1968 self.ui.status(_("adding changesets\n"))
1956 cor = cl.count() - 1
1969 cor = cl.count() - 1
1957 chunkiter = changegroup.chunkiter(source)
1970 chunkiter = changegroup.chunkiter(source)
1958 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1971 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1959 raise util.Abort(_("received changelog group is empty"))
1972 raise util.Abort(_("received changelog group is empty"))
1960 cnr = cl.count() - 1
1973 cnr = cl.count() - 1
1961 changesets = cnr - cor
1974 changesets = cnr - cor
1962
1975
1963 # pull off the manifest group
1976 # pull off the manifest group
1964 self.ui.status(_("adding manifests\n"))
1977 self.ui.status(_("adding manifests\n"))
1965 chunkiter = changegroup.chunkiter(source)
1978 chunkiter = changegroup.chunkiter(source)
1966 # no need to check for empty manifest group here:
1979 # no need to check for empty manifest group here:
1967 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1980 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1968 # no new manifest will be created and the manifest group will
1981 # no new manifest will be created and the manifest group will
1969 # be empty during the pull
1982 # be empty during the pull
1970 self.manifest.addgroup(chunkiter, revmap, trp)
1983 self.manifest.addgroup(chunkiter, revmap, trp)
1971
1984
1972 # process the files
1985 # process the files
1973 self.ui.status(_("adding file changes\n"))
1986 self.ui.status(_("adding file changes\n"))
1974 while 1:
1987 while 1:
1975 f = changegroup.getchunk(source)
1988 f = changegroup.getchunk(source)
1976 if not f:
1989 if not f:
1977 break
1990 break
1978 self.ui.debug(_("adding %s revisions\n") % f)
1991 self.ui.debug(_("adding %s revisions\n") % f)
1979 fl = self.file(f)
1992 fl = self.file(f)
1980 o = fl.count()
1993 o = fl.count()
1981 chunkiter = changegroup.chunkiter(source)
1994 chunkiter = changegroup.chunkiter(source)
1982 if fl.addgroup(chunkiter, revmap, trp) is None:
1995 if fl.addgroup(chunkiter, revmap, trp) is None:
1983 raise util.Abort(_("received file revlog group is empty"))
1996 raise util.Abort(_("received file revlog group is empty"))
1984 revisions += fl.count() - o
1997 revisions += fl.count() - o
1985 files += 1
1998 files += 1
1986
1999
1987 # make changelog see real files again
2000 # make changelog see real files again
1988 cl.finalize(trp)
2001 cl.finalize(trp)
1989
2002
1990 newheads = len(self.changelog.heads())
2003 newheads = len(self.changelog.heads())
1991 heads = ""
2004 heads = ""
1992 if oldheads and newheads != oldheads:
2005 if oldheads and newheads != oldheads:
1993 heads = _(" (%+d heads)") % (newheads - oldheads)
2006 heads = _(" (%+d heads)") % (newheads - oldheads)
1994
2007
1995 self.ui.status(_("added %d changesets"
2008 self.ui.status(_("added %d changesets"
1996 " with %d changes to %d files%s\n")
2009 " with %d changes to %d files%s\n")
1997 % (changesets, revisions, files, heads))
2010 % (changesets, revisions, files, heads))
1998
2011
1999 if changesets > 0:
2012 if changesets > 0:
2000 self.hook('pretxnchangegroup', throw=True,
2013 self.hook('pretxnchangegroup', throw=True,
2001 node=hex(self.changelog.node(cor+1)), source=srctype,
2014 node=hex(self.changelog.node(cor+1)), source=srctype,
2002 url=url)
2015 url=url)
2003
2016
2004 tr.close()
2017 tr.close()
2005 finally:
2018 finally:
2006 del tr
2019 del tr
2007
2020
2008 if changesets > 0:
2021 if changesets > 0:
2009 # forcefully update the on-disk branch cache
2022 # forcefully update the on-disk branch cache
2010 self.ui.debug(_("updating the branch cache\n"))
2023 self.ui.debug(_("updating the branch cache\n"))
2011 self.branchcache = None
2012 self.branchtags()
2024 self.branchtags()
2013 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2025 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2014 source=srctype, url=url)
2026 source=srctype, url=url)
2015
2027
2016 for i in xrange(cor + 1, cnr + 1):
2028 for i in xrange(cor + 1, cnr + 1):
2017 self.hook("incoming", node=hex(self.changelog.node(i)),
2029 self.hook("incoming", node=hex(self.changelog.node(i)),
2018 source=srctype, url=url)
2030 source=srctype, url=url)
2019
2031
2020 # never return 0 here:
2032 # never return 0 here:
2021 if newheads < oldheads:
2033 if newheads < oldheads:
2022 return newheads - oldheads - 1
2034 return newheads - oldheads - 1
2023 else:
2035 else:
2024 return newheads - oldheads + 1
2036 return newheads - oldheads + 1
2025
2037
2026
2038
2027 def stream_in(self, remote):
2039 def stream_in(self, remote):
2028 fp = remote.stream_out()
2040 fp = remote.stream_out()
2029 l = fp.readline()
2041 l = fp.readline()
2030 try:
2042 try:
2031 resp = int(l)
2043 resp = int(l)
2032 except ValueError:
2044 except ValueError:
2033 raise util.UnexpectedOutput(
2045 raise util.UnexpectedOutput(
2034 _('Unexpected response from remote server:'), l)
2046 _('Unexpected response from remote server:'), l)
2035 if resp == 1:
2047 if resp == 1:
2036 raise util.Abort(_('operation forbidden by server'))
2048 raise util.Abort(_('operation forbidden by server'))
2037 elif resp == 2:
2049 elif resp == 2:
2038 raise util.Abort(_('locking the remote repository failed'))
2050 raise util.Abort(_('locking the remote repository failed'))
2039 elif resp != 0:
2051 elif resp != 0:
2040 raise util.Abort(_('the server sent an unknown error code'))
2052 raise util.Abort(_('the server sent an unknown error code'))
2041 self.ui.status(_('streaming all changes\n'))
2053 self.ui.status(_('streaming all changes\n'))
2042 l = fp.readline()
2054 l = fp.readline()
2043 try:
2055 try:
2044 total_files, total_bytes = map(int, l.split(' ', 1))
2056 total_files, total_bytes = map(int, l.split(' ', 1))
2045 except ValueError, TypeError:
2057 except ValueError, TypeError:
2046 raise util.UnexpectedOutput(
2058 raise util.UnexpectedOutput(
2047 _('Unexpected response from remote server:'), l)
2059 _('Unexpected response from remote server:'), l)
2048 self.ui.status(_('%d files to transfer, %s of data\n') %
2060 self.ui.status(_('%d files to transfer, %s of data\n') %
2049 (total_files, util.bytecount(total_bytes)))
2061 (total_files, util.bytecount(total_bytes)))
2050 start = time.time()
2062 start = time.time()
2051 for i in xrange(total_files):
2063 for i in xrange(total_files):
2052 # XXX doesn't support '\n' or '\r' in filenames
2064 # XXX doesn't support '\n' or '\r' in filenames
2053 l = fp.readline()
2065 l = fp.readline()
2054 try:
2066 try:
2055 name, size = l.split('\0', 1)
2067 name, size = l.split('\0', 1)
2056 size = int(size)
2068 size = int(size)
2057 except ValueError, TypeError:
2069 except ValueError, TypeError:
2058 raise util.UnexpectedOutput(
2070 raise util.UnexpectedOutput(
2059 _('Unexpected response from remote server:'), l)
2071 _('Unexpected response from remote server:'), l)
2060 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2072 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2061 ofp = self.sopener(name, 'w')
2073 ofp = self.sopener(name, 'w')
2062 for chunk in util.filechunkiter(fp, limit=size):
2074 for chunk in util.filechunkiter(fp, limit=size):
2063 ofp.write(chunk)
2075 ofp.write(chunk)
2064 ofp.close()
2076 ofp.close()
2065 elapsed = time.time() - start
2077 elapsed = time.time() - start
2066 if elapsed <= 0:
2078 if elapsed <= 0:
2067 elapsed = 0.001
2079 elapsed = 0.001
2068 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2080 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2069 (util.bytecount(total_bytes), elapsed,
2081 (util.bytecount(total_bytes), elapsed,
2070 util.bytecount(total_bytes / elapsed)))
2082 util.bytecount(total_bytes / elapsed)))
2071 self.invalidate()
2083 self.invalidate()
2072 return len(self.heads()) + 1
2084 return len(self.heads()) + 1
2073
2085
2074 def clone(self, remote, heads=[], stream=False):
2086 def clone(self, remote, heads=[], stream=False):
2075 '''clone remote repository.
2087 '''clone remote repository.
2076
2088
2077 keyword arguments:
2089 keyword arguments:
2078 heads: list of revs to clone (forces use of pull)
2090 heads: list of revs to clone (forces use of pull)
2079 stream: use streaming clone if possible'''
2091 stream: use streaming clone if possible'''
2080
2092
2081 # now, all clients that can request uncompressed clones can
2093 # now, all clients that can request uncompressed clones can
2082 # read repo formats supported by all servers that can serve
2094 # read repo formats supported by all servers that can serve
2083 # them.
2095 # them.
2084
2096
2085 # if revlog format changes, client will have to check version
2097 # if revlog format changes, client will have to check version
2086 # and format flags on "stream" capability, and use
2098 # and format flags on "stream" capability, and use
2087 # uncompressed only if compatible.
2099 # uncompressed only if compatible.
2088
2100
2089 if stream and not heads and remote.capable('stream'):
2101 if stream and not heads and remote.capable('stream'):
2090 return self.stream_in(remote)
2102 return self.stream_in(remote)
2091 return self.pull(remote, heads)
2103 return self.pull(remote, heads)
2092
2104
2093 # used to avoid circular references so destructors work
2105 # used to avoid circular references so destructors work
2094 def aftertrans(files):
2106 def aftertrans(files):
2095 renamefiles = [tuple(t) for t in files]
2107 renamefiles = [tuple(t) for t in files]
2096 def a():
2108 def a():
2097 for src, dest in renamefiles:
2109 for src, dest in renamefiles:
2098 util.rename(src, dest)
2110 util.rename(src, dest)
2099 return a
2111 return a
2100
2112
2101 def instance(ui, path, create):
2113 def instance(ui, path, create):
2102 return localrepository(ui, util.drop_scheme('file', path), create)
2114 return localrepository(ui, util.drop_scheme('file', path), create)
2103
2115
2104 def islocal(path):
2116 def islocal(path):
2105 return True
2117 return True
General Comments 0
You need to be logged in to leave comments. Login now