##// END OF EJS Templates
transaction: support multiple, separate transactions...
Henrik Stuart -
r8072:ecf77954 default
parent child Browse files
Show More
@@ -1,2167 +1,2168 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store, encoding
12 import lock, transaction, stat, errno, ui, store, encoding
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise error.RepoError(_("repository %s not found") % path)
50 raise error.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise error.RepoError(_("repository %s already exists") % path)
52 raise error.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise error.RepoError(_("requirement '%s' not supported") % r)
60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 if 'HG_PENDING' in os.environ:
91 if 'HG_PENDING' in os.environ:
92 p = os.environ['HG_PENDING']
92 p = os.environ['HG_PENDING']
93 if p.startswith(self.root):
93 if p.startswith(self.root):
94 self.changelog.readpending('00changelog.i.a')
94 self.changelog.readpending('00changelog.i.a')
95 self.sopener.defversion = self.changelog.version
95 self.sopener.defversion = self.changelog.version
96 return self.changelog
96 return self.changelog
97 if name == 'manifest':
97 if name == 'manifest':
98 self.changelog
98 self.changelog
99 self.manifest = manifest.manifest(self.sopener)
99 self.manifest = manifest.manifest(self.sopener)
100 return self.manifest
100 return self.manifest
101 if name == 'dirstate':
101 if name == 'dirstate':
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 return self.dirstate
103 return self.dirstate
104 else:
104 else:
105 raise AttributeError(name)
105 raise AttributeError(name)
106
106
107 def __getitem__(self, changeid):
107 def __getitem__(self, changeid):
108 if changeid == None:
108 if changeid == None:
109 return context.workingctx(self)
109 return context.workingctx(self)
110 return context.changectx(self, changeid)
110 return context.changectx(self, changeid)
111
111
112 def __nonzero__(self):
112 def __nonzero__(self):
113 return True
113 return True
114
114
115 def __len__(self):
115 def __len__(self):
116 return len(self.changelog)
116 return len(self.changelog)
117
117
118 def __iter__(self):
118 def __iter__(self):
119 for i in xrange(len(self)):
119 for i in xrange(len(self)):
120 yield i
120 yield i
121
121
122 def url(self):
122 def url(self):
123 return 'file:' + self.root
123 return 'file:' + self.root
124
124
125 def hook(self, name, throw=False, **args):
125 def hook(self, name, throw=False, **args):
126 return hook.hook(self.ui, self, name, throw, **args)
126 return hook.hook(self.ui, self, name, throw, **args)
127
127
128 tag_disallowed = ':\r\n'
128 tag_disallowed = ':\r\n'
129
129
130 def _tag(self, names, node, message, local, user, date, parent=None,
130 def _tag(self, names, node, message, local, user, date, parent=None,
131 extra={}):
131 extra={}):
132 use_dirstate = parent is None
132 use_dirstate = parent is None
133
133
134 if isinstance(names, str):
134 if isinstance(names, str):
135 allchars = names
135 allchars = names
136 names = (names,)
136 names = (names,)
137 else:
137 else:
138 allchars = ''.join(names)
138 allchars = ''.join(names)
139 for c in self.tag_disallowed:
139 for c in self.tag_disallowed:
140 if c in allchars:
140 if c in allchars:
141 raise util.Abort(_('%r cannot be used in a tag name') % c)
141 raise util.Abort(_('%r cannot be used in a tag name') % c)
142
142
143 for name in names:
143 for name in names:
144 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 self.hook('pretag', throw=True, node=hex(node), tag=name,
145 local=local)
145 local=local)
146
146
147 def writetags(fp, names, munge, prevtags):
147 def writetags(fp, names, munge, prevtags):
148 fp.seek(0, 2)
148 fp.seek(0, 2)
149 if prevtags and prevtags[-1] != '\n':
149 if prevtags and prevtags[-1] != '\n':
150 fp.write('\n')
150 fp.write('\n')
151 for name in names:
151 for name in names:
152 m = munge and munge(name) or name
152 m = munge and munge(name) or name
153 if self._tagstypecache and name in self._tagstypecache:
153 if self._tagstypecache and name in self._tagstypecache:
154 old = self.tagscache.get(name, nullid)
154 old = self.tagscache.get(name, nullid)
155 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(old), m))
156 fp.write('%s %s\n' % (hex(node), m))
156 fp.write('%s %s\n' % (hex(node), m))
157 fp.close()
157 fp.close()
158
158
159 prevtags = ''
159 prevtags = ''
160 if local:
160 if local:
161 try:
161 try:
162 fp = self.opener('localtags', 'r+')
162 fp = self.opener('localtags', 'r+')
163 except IOError:
163 except IOError:
164 fp = self.opener('localtags', 'a')
164 fp = self.opener('localtags', 'a')
165 else:
165 else:
166 prevtags = fp.read()
166 prevtags = fp.read()
167
167
168 # local tags are stored in the current charset
168 # local tags are stored in the current charset
169 writetags(fp, names, None, prevtags)
169 writetags(fp, names, None, prevtags)
170 for name in names:
170 for name in names:
171 self.hook('tag', node=hex(node), tag=name, local=local)
171 self.hook('tag', node=hex(node), tag=name, local=local)
172 return
172 return
173
173
174 if use_dirstate:
174 if use_dirstate:
175 try:
175 try:
176 fp = self.wfile('.hgtags', 'rb+')
176 fp = self.wfile('.hgtags', 'rb+')
177 except IOError:
177 except IOError:
178 fp = self.wfile('.hgtags', 'ab')
178 fp = self.wfile('.hgtags', 'ab')
179 else:
179 else:
180 prevtags = fp.read()
180 prevtags = fp.read()
181 else:
181 else:
182 try:
182 try:
183 prevtags = self.filectx('.hgtags', parent).data()
183 prevtags = self.filectx('.hgtags', parent).data()
184 except error.LookupError:
184 except error.LookupError:
185 pass
185 pass
186 fp = self.wfile('.hgtags', 'wb')
186 fp = self.wfile('.hgtags', 'wb')
187 if prevtags:
187 if prevtags:
188 fp.write(prevtags)
188 fp.write(prevtags)
189
189
190 # committed tags are stored in UTF-8
190 # committed tags are stored in UTF-8
191 writetags(fp, names, encoding.fromlocal, prevtags)
191 writetags(fp, names, encoding.fromlocal, prevtags)
192
192
193 if use_dirstate and '.hgtags' not in self.dirstate:
193 if use_dirstate and '.hgtags' not in self.dirstate:
194 self.add(['.hgtags'])
194 self.add(['.hgtags'])
195
195
196 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
196 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
197 extra=extra)
197 extra=extra)
198
198
199 for name in names:
199 for name in names:
200 self.hook('tag', node=hex(node), tag=name, local=local)
200 self.hook('tag', node=hex(node), tag=name, local=local)
201
201
202 return tagnode
202 return tagnode
203
203
204 def tag(self, names, node, message, local, user, date):
204 def tag(self, names, node, message, local, user, date):
205 '''tag a revision with one or more symbolic names.
205 '''tag a revision with one or more symbolic names.
206
206
207 names is a list of strings or, when adding a single tag, names may be a
207 names is a list of strings or, when adding a single tag, names may be a
208 string.
208 string.
209
209
210 if local is True, the tags are stored in a per-repository file.
210 if local is True, the tags are stored in a per-repository file.
211 otherwise, they are stored in the .hgtags file, and a new
211 otherwise, they are stored in the .hgtags file, and a new
212 changeset is committed with the change.
212 changeset is committed with the change.
213
213
214 keyword arguments:
214 keyword arguments:
215
215
216 local: whether to store tags in non-version-controlled file
216 local: whether to store tags in non-version-controlled file
217 (default False)
217 (default False)
218
218
219 message: commit message to use if committing
219 message: commit message to use if committing
220
220
221 user: name of user to use if committing
221 user: name of user to use if committing
222
222
223 date: date tuple to use if committing'''
223 date: date tuple to use if committing'''
224
224
225 for x in self.status()[:5]:
225 for x in self.status()[:5]:
226 if '.hgtags' in x:
226 if '.hgtags' in x:
227 raise util.Abort(_('working copy of .hgtags is changed '
227 raise util.Abort(_('working copy of .hgtags is changed '
228 '(please commit .hgtags manually)'))
228 '(please commit .hgtags manually)'))
229
229
230 self.tags() # instantiate the cache
230 self.tags() # instantiate the cache
231 self._tag(names, node, message, local, user, date)
231 self._tag(names, node, message, local, user, date)
232
232
233 def tags(self):
233 def tags(self):
234 '''return a mapping of tag to node'''
234 '''return a mapping of tag to node'''
235 if self.tagscache:
235 if self.tagscache:
236 return self.tagscache
236 return self.tagscache
237
237
238 globaltags = {}
238 globaltags = {}
239 tagtypes = {}
239 tagtypes = {}
240
240
241 def readtags(lines, fn, tagtype):
241 def readtags(lines, fn, tagtype):
242 filetags = {}
242 filetags = {}
243 count = 0
243 count = 0
244
244
245 def warn(msg):
245 def warn(msg):
246 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
246 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247
247
248 for l in lines:
248 for l in lines:
249 count += 1
249 count += 1
250 if not l:
250 if not l:
251 continue
251 continue
252 s = l.split(" ", 1)
252 s = l.split(" ", 1)
253 if len(s) != 2:
253 if len(s) != 2:
254 warn(_("cannot parse entry"))
254 warn(_("cannot parse entry"))
255 continue
255 continue
256 node, key = s
256 node, key = s
257 key = encoding.tolocal(key.strip()) # stored in UTF-8
257 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 try:
258 try:
259 bin_n = bin(node)
259 bin_n = bin(node)
260 except TypeError:
260 except TypeError:
261 warn(_("node '%s' is not well formed") % node)
261 warn(_("node '%s' is not well formed") % node)
262 continue
262 continue
263 if bin_n not in self.changelog.nodemap:
263 if bin_n not in self.changelog.nodemap:
264 warn(_("tag '%s' refers to unknown node") % key)
264 warn(_("tag '%s' refers to unknown node") % key)
265 continue
265 continue
266
266
267 h = []
267 h = []
268 if key in filetags:
268 if key in filetags:
269 n, h = filetags[key]
269 n, h = filetags[key]
270 h.append(n)
270 h.append(n)
271 filetags[key] = (bin_n, h)
271 filetags[key] = (bin_n, h)
272
272
273 for k, nh in filetags.iteritems():
273 for k, nh in filetags.iteritems():
274 if k not in globaltags:
274 if k not in globaltags:
275 globaltags[k] = nh
275 globaltags[k] = nh
276 tagtypes[k] = tagtype
276 tagtypes[k] = tagtype
277 continue
277 continue
278
278
279 # we prefer the global tag if:
279 # we prefer the global tag if:
280 # it supercedes us OR
280 # it supercedes us OR
281 # mutual supercedes and it has a higher rank
281 # mutual supercedes and it has a higher rank
282 # otherwise we win because we're tip-most
282 # otherwise we win because we're tip-most
283 an, ah = nh
283 an, ah = nh
284 bn, bh = globaltags[k]
284 bn, bh = globaltags[k]
285 if (bn != an and an in bh and
285 if (bn != an and an in bh and
286 (bn not in ah or len(bh) > len(ah))):
286 (bn not in ah or len(bh) > len(ah))):
287 an = bn
287 an = bn
288 ah.extend([n for n in bh if n not in ah])
288 ah.extend([n for n in bh if n not in ah])
289 globaltags[k] = an, ah
289 globaltags[k] = an, ah
290 tagtypes[k] = tagtype
290 tagtypes[k] = tagtype
291
291
292 # read the tags file from each head, ending with the tip
292 # read the tags file from each head, ending with the tip
293 f = None
293 f = None
294 for rev, node, fnode in self._hgtagsnodes():
294 for rev, node, fnode in self._hgtagsnodes():
295 f = (f and f.filectx(fnode) or
295 f = (f and f.filectx(fnode) or
296 self.filectx('.hgtags', fileid=fnode))
296 self.filectx('.hgtags', fileid=fnode))
297 readtags(f.data().splitlines(), f, "global")
297 readtags(f.data().splitlines(), f, "global")
298
298
299 try:
299 try:
300 data = encoding.fromlocal(self.opener("localtags").read())
300 data = encoding.fromlocal(self.opener("localtags").read())
301 # localtags are stored in the local character set
301 # localtags are stored in the local character set
302 # while the internal tag table is stored in UTF-8
302 # while the internal tag table is stored in UTF-8
303 readtags(data.splitlines(), "localtags", "local")
303 readtags(data.splitlines(), "localtags", "local")
304 except IOError:
304 except IOError:
305 pass
305 pass
306
306
307 self.tagscache = {}
307 self.tagscache = {}
308 self._tagstypecache = {}
308 self._tagstypecache = {}
309 for k, nh in globaltags.iteritems():
309 for k, nh in globaltags.iteritems():
310 n = nh[0]
310 n = nh[0]
311 if n != nullid:
311 if n != nullid:
312 self.tagscache[k] = n
312 self.tagscache[k] = n
313 self._tagstypecache[k] = tagtypes[k]
313 self._tagstypecache[k] = tagtypes[k]
314 self.tagscache['tip'] = self.changelog.tip()
314 self.tagscache['tip'] = self.changelog.tip()
315 return self.tagscache
315 return self.tagscache
316
316
317 def tagtype(self, tagname):
317 def tagtype(self, tagname):
318 '''
318 '''
319 return the type of the given tag. result can be:
319 return the type of the given tag. result can be:
320
320
321 'local' : a local tag
321 'local' : a local tag
322 'global' : a global tag
322 'global' : a global tag
323 None : tag does not exist
323 None : tag does not exist
324 '''
324 '''
325
325
326 self.tags()
326 self.tags()
327
327
328 return self._tagstypecache.get(tagname)
328 return self._tagstypecache.get(tagname)
329
329
330 def _hgtagsnodes(self):
330 def _hgtagsnodes(self):
331 heads = self.heads()
331 heads = self.heads()
332 heads.reverse()
332 heads.reverse()
333 last = {}
333 last = {}
334 ret = []
334 ret = []
335 for node in heads:
335 for node in heads:
336 c = self[node]
336 c = self[node]
337 rev = c.rev()
337 rev = c.rev()
338 try:
338 try:
339 fnode = c.filenode('.hgtags')
339 fnode = c.filenode('.hgtags')
340 except error.LookupError:
340 except error.LookupError:
341 continue
341 continue
342 ret.append((rev, node, fnode))
342 ret.append((rev, node, fnode))
343 if fnode in last:
343 if fnode in last:
344 ret[last[fnode]] = None
344 ret[last[fnode]] = None
345 last[fnode] = len(ret) - 1
345 last[fnode] = len(ret) - 1
346 return [item for item in ret if item]
346 return [item for item in ret if item]
347
347
348 def tagslist(self):
348 def tagslist(self):
349 '''return a list of tags ordered by revision'''
349 '''return a list of tags ordered by revision'''
350 l = []
350 l = []
351 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
352 try:
352 try:
353 r = self.changelog.rev(n)
353 r = self.changelog.rev(n)
354 except:
354 except:
355 r = -2 # sort to the beginning of the list if unknown
355 r = -2 # sort to the beginning of the list if unknown
356 l.append((r, t, n))
356 l.append((r, t, n))
357 return [(t, n) for r, t, n in util.sort(l)]
357 return [(t, n) for r, t, n in util.sort(l)]
358
358
359 def nodetags(self, node):
359 def nodetags(self, node):
360 '''return the tags associated with a node'''
360 '''return the tags associated with a node'''
361 if not self.nodetagscache:
361 if not self.nodetagscache:
362 self.nodetagscache = {}
362 self.nodetagscache = {}
363 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
364 self.nodetagscache.setdefault(n, []).append(t)
364 self.nodetagscache.setdefault(n, []).append(t)
365 return self.nodetagscache.get(node, [])
365 return self.nodetagscache.get(node, [])
366
366
367 def _branchtags(self, partial, lrev):
367 def _branchtags(self, partial, lrev):
368 # TODO: rename this function?
368 # TODO: rename this function?
369 tiprev = len(self) - 1
369 tiprev = len(self) - 1
370 if lrev != tiprev:
370 if lrev != tiprev:
371 self._updatebranchcache(partial, lrev+1, tiprev+1)
371 self._updatebranchcache(partial, lrev+1, tiprev+1)
372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
373
373
374 return partial
374 return partial
375
375
376 def _branchheads(self):
376 def _branchheads(self):
377 tip = self.changelog.tip()
377 tip = self.changelog.tip()
378 if self.branchcache is not None and self._branchcachetip == tip:
378 if self.branchcache is not None and self._branchcachetip == tip:
379 return self.branchcache
379 return self.branchcache
380
380
381 oldtip = self._branchcachetip
381 oldtip = self._branchcachetip
382 self._branchcachetip = tip
382 self._branchcachetip = tip
383 if self.branchcache is None:
383 if self.branchcache is None:
384 self.branchcache = {} # avoid recursion in changectx
384 self.branchcache = {} # avoid recursion in changectx
385 else:
385 else:
386 self.branchcache.clear() # keep using the same dict
386 self.branchcache.clear() # keep using the same dict
387 if oldtip is None or oldtip not in self.changelog.nodemap:
387 if oldtip is None or oldtip not in self.changelog.nodemap:
388 partial, last, lrev = self._readbranchcache()
388 partial, last, lrev = self._readbranchcache()
389 else:
389 else:
390 lrev = self.changelog.rev(oldtip)
390 lrev = self.changelog.rev(oldtip)
391 partial = self._ubranchcache
391 partial = self._ubranchcache
392
392
393 self._branchtags(partial, lrev)
393 self._branchtags(partial, lrev)
394 # this private cache holds all heads (not just tips)
394 # this private cache holds all heads (not just tips)
395 self._ubranchcache = partial
395 self._ubranchcache = partial
396
396
397 # the branch cache is stored on disk as UTF-8, but in the local
397 # the branch cache is stored on disk as UTF-8, but in the local
398 # charset internally
398 # charset internally
399 for k, v in partial.iteritems():
399 for k, v in partial.iteritems():
400 self.branchcache[encoding.tolocal(k)] = v
400 self.branchcache[encoding.tolocal(k)] = v
401 return self.branchcache
401 return self.branchcache
402
402
403
403
404 def branchtags(self):
404 def branchtags(self):
405 '''return a dict where branch names map to the tipmost head of
405 '''return a dict where branch names map to the tipmost head of
406 the branch, open heads come before closed'''
406 the branch, open heads come before closed'''
407 bt = {}
407 bt = {}
408 for bn, heads in self._branchheads().iteritems():
408 for bn, heads in self._branchheads().iteritems():
409 head = None
409 head = None
410 for i in range(len(heads)-1, -1, -1):
410 for i in range(len(heads)-1, -1, -1):
411 h = heads[i]
411 h = heads[i]
412 if 'close' not in self.changelog.read(h)[5]:
412 if 'close' not in self.changelog.read(h)[5]:
413 head = h
413 head = h
414 break
414 break
415 # no open heads were found
415 # no open heads were found
416 if head is None:
416 if head is None:
417 head = heads[-1]
417 head = heads[-1]
418 bt[bn] = head
418 bt[bn] = head
419 return bt
419 return bt
420
420
421
421
422 def _readbranchcache(self):
422 def _readbranchcache(self):
423 partial = {}
423 partial = {}
424 try:
424 try:
425 f = self.opener("branchheads.cache")
425 f = self.opener("branchheads.cache")
426 lines = f.read().split('\n')
426 lines = f.read().split('\n')
427 f.close()
427 f.close()
428 except (IOError, OSError):
428 except (IOError, OSError):
429 return {}, nullid, nullrev
429 return {}, nullid, nullrev
430
430
431 try:
431 try:
432 last, lrev = lines.pop(0).split(" ", 1)
432 last, lrev = lines.pop(0).split(" ", 1)
433 last, lrev = bin(last), int(lrev)
433 last, lrev = bin(last), int(lrev)
434 if lrev >= len(self) or self[lrev].node() != last:
434 if lrev >= len(self) or self[lrev].node() != last:
435 # invalidate the cache
435 # invalidate the cache
436 raise ValueError('invalidating branch cache (tip differs)')
436 raise ValueError('invalidating branch cache (tip differs)')
437 for l in lines:
437 for l in lines:
438 if not l: continue
438 if not l: continue
439 node, label = l.split(" ", 1)
439 node, label = l.split(" ", 1)
440 partial.setdefault(label.strip(), []).append(bin(node))
440 partial.setdefault(label.strip(), []).append(bin(node))
441 except KeyboardInterrupt:
441 except KeyboardInterrupt:
442 raise
442 raise
443 except Exception, inst:
443 except Exception, inst:
444 if self.ui.debugflag:
444 if self.ui.debugflag:
445 self.ui.warn(str(inst), '\n')
445 self.ui.warn(str(inst), '\n')
446 partial, last, lrev = {}, nullid, nullrev
446 partial, last, lrev = {}, nullid, nullrev
447 return partial, last, lrev
447 return partial, last, lrev
448
448
449 def _writebranchcache(self, branches, tip, tiprev):
449 def _writebranchcache(self, branches, tip, tiprev):
450 try:
450 try:
451 f = self.opener("branchheads.cache", "w", atomictemp=True)
451 f = self.opener("branchheads.cache", "w", atomictemp=True)
452 f.write("%s %s\n" % (hex(tip), tiprev))
452 f.write("%s %s\n" % (hex(tip), tiprev))
453 for label, nodes in branches.iteritems():
453 for label, nodes in branches.iteritems():
454 for node in nodes:
454 for node in nodes:
455 f.write("%s %s\n" % (hex(node), label))
455 f.write("%s %s\n" % (hex(node), label))
456 f.rename()
456 f.rename()
457 except (IOError, OSError):
457 except (IOError, OSError):
458 pass
458 pass
459
459
460 def _updatebranchcache(self, partial, start, end):
460 def _updatebranchcache(self, partial, start, end):
461 for r in xrange(start, end):
461 for r in xrange(start, end):
462 c = self[r]
462 c = self[r]
463 b = c.branch()
463 b = c.branch()
464 bheads = partial.setdefault(b, [])
464 bheads = partial.setdefault(b, [])
465 bheads.append(c.node())
465 bheads.append(c.node())
466 for p in c.parents():
466 for p in c.parents():
467 pn = p.node()
467 pn = p.node()
468 if pn in bheads:
468 if pn in bheads:
469 bheads.remove(pn)
469 bheads.remove(pn)
470
470
471 def lookup(self, key):
471 def lookup(self, key):
472 if isinstance(key, int):
472 if isinstance(key, int):
473 return self.changelog.node(key)
473 return self.changelog.node(key)
474 elif key == '.':
474 elif key == '.':
475 return self.dirstate.parents()[0]
475 return self.dirstate.parents()[0]
476 elif key == 'null':
476 elif key == 'null':
477 return nullid
477 return nullid
478 elif key == 'tip':
478 elif key == 'tip':
479 return self.changelog.tip()
479 return self.changelog.tip()
480 n = self.changelog._match(key)
480 n = self.changelog._match(key)
481 if n:
481 if n:
482 return n
482 return n
483 if key in self.tags():
483 if key in self.tags():
484 return self.tags()[key]
484 return self.tags()[key]
485 if key in self.branchtags():
485 if key in self.branchtags():
486 return self.branchtags()[key]
486 return self.branchtags()[key]
487 n = self.changelog._partialmatch(key)
487 n = self.changelog._partialmatch(key)
488 if n:
488 if n:
489 return n
489 return n
490 try:
490 try:
491 if len(key) == 20:
491 if len(key) == 20:
492 key = hex(key)
492 key = hex(key)
493 except:
493 except:
494 pass
494 pass
495 raise error.RepoError(_("unknown revision '%s'") % key)
495 raise error.RepoError(_("unknown revision '%s'") % key)
496
496
497 def local(self):
497 def local(self):
498 return True
498 return True
499
499
500 def join(self, f):
500 def join(self, f):
501 return os.path.join(self.path, f)
501 return os.path.join(self.path, f)
502
502
503 def wjoin(self, f):
503 def wjoin(self, f):
504 return os.path.join(self.root, f)
504 return os.path.join(self.root, f)
505
505
506 def rjoin(self, f):
506 def rjoin(self, f):
507 return os.path.join(self.root, util.pconvert(f))
507 return os.path.join(self.root, util.pconvert(f))
508
508
509 def file(self, f):
509 def file(self, f):
510 if f[0] == '/':
510 if f[0] == '/':
511 f = f[1:]
511 f = f[1:]
512 return filelog.filelog(self.sopener, f)
512 return filelog.filelog(self.sopener, f)
513
513
514 def changectx(self, changeid):
514 def changectx(self, changeid):
515 return self[changeid]
515 return self[changeid]
516
516
517 def parents(self, changeid=None):
517 def parents(self, changeid=None):
518 '''get list of changectxs for parents of changeid'''
518 '''get list of changectxs for parents of changeid'''
519 return self[changeid].parents()
519 return self[changeid].parents()
520
520
521 def filectx(self, path, changeid=None, fileid=None):
521 def filectx(self, path, changeid=None, fileid=None):
522 """changeid can be a changeset revision, node, or tag.
522 """changeid can be a changeset revision, node, or tag.
523 fileid can be a file revision or node."""
523 fileid can be a file revision or node."""
524 return context.filectx(self, path, changeid, fileid)
524 return context.filectx(self, path, changeid, fileid)
525
525
526 def getcwd(self):
526 def getcwd(self):
527 return self.dirstate.getcwd()
527 return self.dirstate.getcwd()
528
528
529 def pathto(self, f, cwd=None):
529 def pathto(self, f, cwd=None):
530 return self.dirstate.pathto(f, cwd)
530 return self.dirstate.pathto(f, cwd)
531
531
532 def wfile(self, f, mode='r'):
532 def wfile(self, f, mode='r'):
533 return self.wopener(f, mode)
533 return self.wopener(f, mode)
534
534
535 def _link(self, f):
535 def _link(self, f):
536 return os.path.islink(self.wjoin(f))
536 return os.path.islink(self.wjoin(f))
537
537
538 def _filter(self, filter, filename, data):
538 def _filter(self, filter, filename, data):
539 if filter not in self.filterpats:
539 if filter not in self.filterpats:
540 l = []
540 l = []
541 for pat, cmd in self.ui.configitems(filter):
541 for pat, cmd in self.ui.configitems(filter):
542 if cmd == '!':
542 if cmd == '!':
543 continue
543 continue
544 mf = util.matcher(self.root, "", [pat], [], [])[1]
544 mf = util.matcher(self.root, "", [pat], [], [])[1]
545 fn = None
545 fn = None
546 params = cmd
546 params = cmd
547 for name, filterfn in self._datafilters.iteritems():
547 for name, filterfn in self._datafilters.iteritems():
548 if cmd.startswith(name):
548 if cmd.startswith(name):
549 fn = filterfn
549 fn = filterfn
550 params = cmd[len(name):].lstrip()
550 params = cmd[len(name):].lstrip()
551 break
551 break
552 if not fn:
552 if not fn:
553 fn = lambda s, c, **kwargs: util.filter(s, c)
553 fn = lambda s, c, **kwargs: util.filter(s, c)
554 # Wrap old filters not supporting keyword arguments
554 # Wrap old filters not supporting keyword arguments
555 if not inspect.getargspec(fn)[2]:
555 if not inspect.getargspec(fn)[2]:
556 oldfn = fn
556 oldfn = fn
557 fn = lambda s, c, **kwargs: oldfn(s, c)
557 fn = lambda s, c, **kwargs: oldfn(s, c)
558 l.append((mf, fn, params))
558 l.append((mf, fn, params))
559 self.filterpats[filter] = l
559 self.filterpats[filter] = l
560
560
561 for mf, fn, cmd in self.filterpats[filter]:
561 for mf, fn, cmd in self.filterpats[filter]:
562 if mf(filename):
562 if mf(filename):
563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
565 break
565 break
566
566
567 return data
567 return data
568
568
569 def adddatafilter(self, name, filter):
569 def adddatafilter(self, name, filter):
570 self._datafilters[name] = filter
570 self._datafilters[name] = filter
571
571
572 def wread(self, filename):
572 def wread(self, filename):
573 if self._link(filename):
573 if self._link(filename):
574 data = os.readlink(self.wjoin(filename))
574 data = os.readlink(self.wjoin(filename))
575 else:
575 else:
576 data = self.wopener(filename, 'r').read()
576 data = self.wopener(filename, 'r').read()
577 return self._filter("encode", filename, data)
577 return self._filter("encode", filename, data)
578
578
579 def wwrite(self, filename, data, flags):
579 def wwrite(self, filename, data, flags):
580 data = self._filter("decode", filename, data)
580 data = self._filter("decode", filename, data)
581 try:
581 try:
582 os.unlink(self.wjoin(filename))
582 os.unlink(self.wjoin(filename))
583 except OSError:
583 except OSError:
584 pass
584 pass
585 if 'l' in flags:
585 if 'l' in flags:
586 self.wopener.symlink(data, filename)
586 self.wopener.symlink(data, filename)
587 else:
587 else:
588 self.wopener(filename, 'w').write(data)
588 self.wopener(filename, 'w').write(data)
589 if 'x' in flags:
589 if 'x' in flags:
590 util.set_flags(self.wjoin(filename), False, True)
590 util.set_flags(self.wjoin(filename), False, True)
591
591
592 def wwritedata(self, filename, data):
592 def wwritedata(self, filename, data):
593 return self._filter("decode", filename, data)
593 return self._filter("decode", filename, data)
594
594
595 def transaction(self):
595 def transaction(self):
596 if self._transref and self._transref():
596 tr = self._transref and self._transref() or None
597 return self._transref().nest()
597 if tr and tr.running():
598 return tr.nest()
598
599
599 # abort here if the journal already exists
600 # abort here if the journal already exists
600 if os.path.exists(self.sjoin("journal")):
601 if os.path.exists(self.sjoin("journal")):
601 raise error.RepoError(_("journal already exists - run hg recover"))
602 raise error.RepoError(_("journal already exists - run hg recover"))
602
603
603 # save dirstate for rollback
604 # save dirstate for rollback
604 try:
605 try:
605 ds = self.opener("dirstate").read()
606 ds = self.opener("dirstate").read()
606 except IOError:
607 except IOError:
607 ds = ""
608 ds = ""
608 self.opener("journal.dirstate", "w").write(ds)
609 self.opener("journal.dirstate", "w").write(ds)
609 self.opener("journal.branch", "w").write(self.dirstate.branch())
610 self.opener("journal.branch", "w").write(self.dirstate.branch())
610
611
611 renames = [(self.sjoin("journal"), self.sjoin("undo")),
612 renames = [(self.sjoin("journal"), self.sjoin("undo")),
612 (self.join("journal.dirstate"), self.join("undo.dirstate")),
613 (self.join("journal.dirstate"), self.join("undo.dirstate")),
613 (self.join("journal.branch"), self.join("undo.branch"))]
614 (self.join("journal.branch"), self.join("undo.branch"))]
614 tr = transaction.transaction(self.ui.warn, self.sopener,
615 tr = transaction.transaction(self.ui.warn, self.sopener,
615 self.sjoin("journal"),
616 self.sjoin("journal"),
616 aftertrans(renames),
617 aftertrans(renames),
617 self.store.createmode)
618 self.store.createmode)
618 self._transref = weakref.ref(tr)
619 self._transref = weakref.ref(tr)
619 return tr
620 return tr
620
621
621 def recover(self):
622 def recover(self):
622 l = self.lock()
623 l = self.lock()
623 try:
624 try:
624 if os.path.exists(self.sjoin("journal")):
625 if os.path.exists(self.sjoin("journal")):
625 self.ui.status(_("rolling back interrupted transaction\n"))
626 self.ui.status(_("rolling back interrupted transaction\n"))
626 transaction.rollback(self.sopener, self.sjoin("journal"))
627 transaction.rollback(self.sopener, self.sjoin("journal"))
627 self.invalidate()
628 self.invalidate()
628 return True
629 return True
629 else:
630 else:
630 self.ui.warn(_("no interrupted transaction available\n"))
631 self.ui.warn(_("no interrupted transaction available\n"))
631 return False
632 return False
632 finally:
633 finally:
633 del l
634 del l
634
635
635 def rollback(self):
636 def rollback(self):
636 wlock = lock = None
637 wlock = lock = None
637 try:
638 try:
638 wlock = self.wlock()
639 wlock = self.wlock()
639 lock = self.lock()
640 lock = self.lock()
640 if os.path.exists(self.sjoin("undo")):
641 if os.path.exists(self.sjoin("undo")):
641 self.ui.status(_("rolling back last transaction\n"))
642 self.ui.status(_("rolling back last transaction\n"))
642 transaction.rollback(self.sopener, self.sjoin("undo"))
643 transaction.rollback(self.sopener, self.sjoin("undo"))
643 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
644 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
644 try:
645 try:
645 branch = self.opener("undo.branch").read()
646 branch = self.opener("undo.branch").read()
646 self.dirstate.setbranch(branch)
647 self.dirstate.setbranch(branch)
647 except IOError:
648 except IOError:
648 self.ui.warn(_("Named branch could not be reset, "
649 self.ui.warn(_("Named branch could not be reset, "
649 "current branch still is: %s\n")
650 "current branch still is: %s\n")
650 % encoding.tolocal(self.dirstate.branch()))
651 % encoding.tolocal(self.dirstate.branch()))
651 self.invalidate()
652 self.invalidate()
652 self.dirstate.invalidate()
653 self.dirstate.invalidate()
653 else:
654 else:
654 self.ui.warn(_("no rollback information available\n"))
655 self.ui.warn(_("no rollback information available\n"))
655 finally:
656 finally:
656 del lock, wlock
657 del lock, wlock
657
658
658 def invalidate(self):
659 def invalidate(self):
659 for a in "changelog manifest".split():
660 for a in "changelog manifest".split():
660 if a in self.__dict__:
661 if a in self.__dict__:
661 delattr(self, a)
662 delattr(self, a)
662 self.tagscache = None
663 self.tagscache = None
663 self._tagstypecache = None
664 self._tagstypecache = None
664 self.nodetagscache = None
665 self.nodetagscache = None
665 self.branchcache = None
666 self.branchcache = None
666 self._ubranchcache = None
667 self._ubranchcache = None
667 self._branchcachetip = None
668 self._branchcachetip = None
668
669
669 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
670 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
670 try:
671 try:
671 l = lock.lock(lockname, 0, releasefn, desc=desc)
672 l = lock.lock(lockname, 0, releasefn, desc=desc)
672 except error.LockHeld, inst:
673 except error.LockHeld, inst:
673 if not wait:
674 if not wait:
674 raise
675 raise
675 self.ui.warn(_("waiting for lock on %s held by %r\n") %
676 self.ui.warn(_("waiting for lock on %s held by %r\n") %
676 (desc, inst.locker))
677 (desc, inst.locker))
677 # default to 600 seconds timeout
678 # default to 600 seconds timeout
678 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
679 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
679 releasefn, desc=desc)
680 releasefn, desc=desc)
680 if acquirefn:
681 if acquirefn:
681 acquirefn()
682 acquirefn()
682 return l
683 return l
683
684
684 def lock(self, wait=True):
685 def lock(self, wait=True):
685 if self._lockref and self._lockref():
686 if self._lockref and self._lockref():
686 return self._lockref()
687 return self._lockref()
687
688
688 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
689 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
689 _('repository %s') % self.origroot)
690 _('repository %s') % self.origroot)
690 self._lockref = weakref.ref(l)
691 self._lockref = weakref.ref(l)
691 return l
692 return l
692
693
693 def wlock(self, wait=True):
694 def wlock(self, wait=True):
694 if self._wlockref and self._wlockref():
695 if self._wlockref and self._wlockref():
695 return self._wlockref()
696 return self._wlockref()
696
697
697 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
698 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
698 self.dirstate.invalidate, _('working directory of %s') %
699 self.dirstate.invalidate, _('working directory of %s') %
699 self.origroot)
700 self.origroot)
700 self._wlockref = weakref.ref(l)
701 self._wlockref = weakref.ref(l)
701 return l
702 return l
702
703
703 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
704 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
704 """
705 """
705 commit an individual file as part of a larger transaction
706 commit an individual file as part of a larger transaction
706 """
707 """
707
708
708 fn = fctx.path()
709 fn = fctx.path()
709 t = fctx.data()
710 t = fctx.data()
710 fl = self.file(fn)
711 fl = self.file(fn)
711 fp1 = manifest1.get(fn, nullid)
712 fp1 = manifest1.get(fn, nullid)
712 fp2 = manifest2.get(fn, nullid)
713 fp2 = manifest2.get(fn, nullid)
713
714
714 meta = {}
715 meta = {}
715 cp = fctx.renamed()
716 cp = fctx.renamed()
716 if cp and cp[0] != fn:
717 if cp and cp[0] != fn:
717 # Mark the new revision of this file as a copy of another
718 # Mark the new revision of this file as a copy of another
718 # file. This copy data will effectively act as a parent
719 # file. This copy data will effectively act as a parent
719 # of this new revision. If this is a merge, the first
720 # of this new revision. If this is a merge, the first
720 # parent will be the nullid (meaning "look up the copy data")
721 # parent will be the nullid (meaning "look up the copy data")
721 # and the second one will be the other parent. For example:
722 # and the second one will be the other parent. For example:
722 #
723 #
723 # 0 --- 1 --- 3 rev1 changes file foo
724 # 0 --- 1 --- 3 rev1 changes file foo
724 # \ / rev2 renames foo to bar and changes it
725 # \ / rev2 renames foo to bar and changes it
725 # \- 2 -/ rev3 should have bar with all changes and
726 # \- 2 -/ rev3 should have bar with all changes and
726 # should record that bar descends from
727 # should record that bar descends from
727 # bar in rev2 and foo in rev1
728 # bar in rev2 and foo in rev1
728 #
729 #
729 # this allows this merge to succeed:
730 # this allows this merge to succeed:
730 #
731 #
731 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
732 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
732 # \ / merging rev3 and rev4 should use bar@rev2
733 # \ / merging rev3 and rev4 should use bar@rev2
733 # \- 2 --- 4 as the merge base
734 # \- 2 --- 4 as the merge base
734 #
735 #
735
736
736 cf = cp[0]
737 cf = cp[0]
737 cr = manifest1.get(cf)
738 cr = manifest1.get(cf)
738 nfp = fp2
739 nfp = fp2
739
740
740 if manifest2: # branch merge
741 if manifest2: # branch merge
741 if fp2 == nullid or cr is None: # copied on remote side
742 if fp2 == nullid or cr is None: # copied on remote side
742 if cf in manifest2:
743 if cf in manifest2:
743 cr = manifest2[cf]
744 cr = manifest2[cf]
744 nfp = fp1
745 nfp = fp1
745
746
746 # find source in nearest ancestor if we've lost track
747 # find source in nearest ancestor if we've lost track
747 if not cr:
748 if not cr:
748 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
749 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
749 (fn, cf))
750 (fn, cf))
750 for a in self['.'].ancestors():
751 for a in self['.'].ancestors():
751 if cf in a:
752 if cf in a:
752 cr = a[cf].filenode()
753 cr = a[cf].filenode()
753 break
754 break
754
755
755 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
756 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
756 meta["copy"] = cf
757 meta["copy"] = cf
757 meta["copyrev"] = hex(cr)
758 meta["copyrev"] = hex(cr)
758 fp1, fp2 = nullid, nfp
759 fp1, fp2 = nullid, nfp
759 elif fp2 != nullid:
760 elif fp2 != nullid:
760 # is one parent an ancestor of the other?
761 # is one parent an ancestor of the other?
761 fpa = fl.ancestor(fp1, fp2)
762 fpa = fl.ancestor(fp1, fp2)
762 if fpa == fp1:
763 if fpa == fp1:
763 fp1, fp2 = fp2, nullid
764 fp1, fp2 = fp2, nullid
764 elif fpa == fp2:
765 elif fpa == fp2:
765 fp2 = nullid
766 fp2 = nullid
766
767
767 # is the file unmodified from the parent? report existing entry
768 # is the file unmodified from the parent? report existing entry
768 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
769 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
769 return fp1
770 return fp1
770
771
771 changelist.append(fn)
772 changelist.append(fn)
772 return fl.add(t, meta, tr, linkrev, fp1, fp2)
773 return fl.add(t, meta, tr, linkrev, fp1, fp2)
773
774
774 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
775 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
775 if p1 is None:
776 if p1 is None:
776 p1, p2 = self.dirstate.parents()
777 p1, p2 = self.dirstate.parents()
777 return self.commit(files=files, text=text, user=user, date=date,
778 return self.commit(files=files, text=text, user=user, date=date,
778 p1=p1, p2=p2, extra=extra, empty_ok=True)
779 p1=p1, p2=p2, extra=extra, empty_ok=True)
779
780
780 def commit(self, files=None, text="", user=None, date=None,
781 def commit(self, files=None, text="", user=None, date=None,
781 match=None, force=False, force_editor=False,
782 match=None, force=False, force_editor=False,
782 p1=None, p2=None, extra={}, empty_ok=False):
783 p1=None, p2=None, extra={}, empty_ok=False):
783 wlock = lock = None
784 wlock = lock = None
784 if extra.get("close"):
785 if extra.get("close"):
785 force = True
786 force = True
786 if files:
787 if files:
787 files = util.unique(files)
788 files = util.unique(files)
788 try:
789 try:
789 wlock = self.wlock()
790 wlock = self.wlock()
790 lock = self.lock()
791 lock = self.lock()
791 use_dirstate = (p1 is None) # not rawcommit
792 use_dirstate = (p1 is None) # not rawcommit
792
793
793 if use_dirstate:
794 if use_dirstate:
794 p1, p2 = self.dirstate.parents()
795 p1, p2 = self.dirstate.parents()
795 update_dirstate = True
796 update_dirstate = True
796
797
797 if (not force and p2 != nullid and
798 if (not force and p2 != nullid and
798 (match and (match.files() or match.anypats()))):
799 (match and (match.files() or match.anypats()))):
799 raise util.Abort(_('cannot partially commit a merge '
800 raise util.Abort(_('cannot partially commit a merge '
800 '(do not specify files or patterns)'))
801 '(do not specify files or patterns)'))
801
802
802 if files:
803 if files:
803 modified, removed = [], []
804 modified, removed = [], []
804 for f in files:
805 for f in files:
805 s = self.dirstate[f]
806 s = self.dirstate[f]
806 if s in 'nma':
807 if s in 'nma':
807 modified.append(f)
808 modified.append(f)
808 elif s == 'r':
809 elif s == 'r':
809 removed.append(f)
810 removed.append(f)
810 else:
811 else:
811 self.ui.warn(_("%s not tracked!\n") % f)
812 self.ui.warn(_("%s not tracked!\n") % f)
812 changes = [modified, [], removed, [], []]
813 changes = [modified, [], removed, [], []]
813 else:
814 else:
814 changes = self.status(match=match)
815 changes = self.status(match=match)
815 else:
816 else:
816 p1, p2 = p1, p2 or nullid
817 p1, p2 = p1, p2 or nullid
817 update_dirstate = (self.dirstate.parents()[0] == p1)
818 update_dirstate = (self.dirstate.parents()[0] == p1)
818 changes = [files, [], [], [], []]
819 changes = [files, [], [], [], []]
819
820
820 ms = merge_.mergestate(self)
821 ms = merge_.mergestate(self)
821 for f in changes[0]:
822 for f in changes[0]:
822 if f in ms and ms[f] == 'u':
823 if f in ms and ms[f] == 'u':
823 raise util.Abort(_("unresolved merge conflicts "
824 raise util.Abort(_("unresolved merge conflicts "
824 "(see hg resolve)"))
825 "(see hg resolve)"))
825 wctx = context.workingctx(self, (p1, p2), text, user, date,
826 wctx = context.workingctx(self, (p1, p2), text, user, date,
826 extra, changes)
827 extra, changes)
827 r = self._commitctx(wctx, force, force_editor, empty_ok,
828 r = self._commitctx(wctx, force, force_editor, empty_ok,
828 use_dirstate, update_dirstate)
829 use_dirstate, update_dirstate)
829 ms.reset()
830 ms.reset()
830 return r
831 return r
831
832
832 finally:
833 finally:
833 del lock, wlock
834 del lock, wlock
834
835
835 def commitctx(self, ctx):
836 def commitctx(self, ctx):
836 """Add a new revision to current repository.
837 """Add a new revision to current repository.
837
838
838 Revision information is passed in the context.memctx argument.
839 Revision information is passed in the context.memctx argument.
839 commitctx() does not touch the working directory.
840 commitctx() does not touch the working directory.
840 """
841 """
841 wlock = lock = None
842 wlock = lock = None
842 try:
843 try:
843 wlock = self.wlock()
844 wlock = self.wlock()
844 lock = self.lock()
845 lock = self.lock()
845 return self._commitctx(ctx, force=True, force_editor=False,
846 return self._commitctx(ctx, force=True, force_editor=False,
846 empty_ok=True, use_dirstate=False,
847 empty_ok=True, use_dirstate=False,
847 update_dirstate=False)
848 update_dirstate=False)
848 finally:
849 finally:
849 del lock, wlock
850 del lock, wlock
850
851
851 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
852 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
852 use_dirstate=True, update_dirstate=True):
853 use_dirstate=True, update_dirstate=True):
853 tr = None
854 tr = None
854 valid = 0 # don't save the dirstate if this isn't set
855 valid = 0 # don't save the dirstate if this isn't set
855 try:
856 try:
856 commit = util.sort(wctx.modified() + wctx.added())
857 commit = util.sort(wctx.modified() + wctx.added())
857 remove = wctx.removed()
858 remove = wctx.removed()
858 extra = wctx.extra().copy()
859 extra = wctx.extra().copy()
859 branchname = extra['branch']
860 branchname = extra['branch']
860 user = wctx.user()
861 user = wctx.user()
861 text = wctx.description()
862 text = wctx.description()
862
863
863 p1, p2 = [p.node() for p in wctx.parents()]
864 p1, p2 = [p.node() for p in wctx.parents()]
864 c1 = self.changelog.read(p1)
865 c1 = self.changelog.read(p1)
865 c2 = self.changelog.read(p2)
866 c2 = self.changelog.read(p2)
866 m1 = self.manifest.read(c1[0]).copy()
867 m1 = self.manifest.read(c1[0]).copy()
867 m2 = self.manifest.read(c2[0])
868 m2 = self.manifest.read(c2[0])
868
869
869 if use_dirstate:
870 if use_dirstate:
870 oldname = c1[5].get("branch") # stored in UTF-8
871 oldname = c1[5].get("branch") # stored in UTF-8
871 if (not commit and not remove and not force and p2 == nullid
872 if (not commit and not remove and not force and p2 == nullid
872 and branchname == oldname):
873 and branchname == oldname):
873 self.ui.status(_("nothing changed\n"))
874 self.ui.status(_("nothing changed\n"))
874 return None
875 return None
875
876
876 xp1 = hex(p1)
877 xp1 = hex(p1)
877 if p2 == nullid: xp2 = ''
878 if p2 == nullid: xp2 = ''
878 else: xp2 = hex(p2)
879 else: xp2 = hex(p2)
879
880
880 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
881 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
881
882
882 tr = self.transaction()
883 tr = self.transaction()
883 trp = weakref.proxy(tr)
884 trp = weakref.proxy(tr)
884
885
885 # check in files
886 # check in files
886 new = {}
887 new = {}
887 changed = []
888 changed = []
888 linkrev = len(self)
889 linkrev = len(self)
889 for f in commit:
890 for f in commit:
890 self.ui.note(f + "\n")
891 self.ui.note(f + "\n")
891 try:
892 try:
892 fctx = wctx.filectx(f)
893 fctx = wctx.filectx(f)
893 newflags = fctx.flags()
894 newflags = fctx.flags()
894 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
895 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
895 if ((not changed or changed[-1] != f) and
896 if ((not changed or changed[-1] != f) and
896 m2.get(f) != new[f]):
897 m2.get(f) != new[f]):
897 # mention the file in the changelog if some
898 # mention the file in the changelog if some
898 # flag changed, even if there was no content
899 # flag changed, even if there was no content
899 # change.
900 # change.
900 if m1.flags(f) != newflags:
901 if m1.flags(f) != newflags:
901 changed.append(f)
902 changed.append(f)
902 m1.set(f, newflags)
903 m1.set(f, newflags)
903 if use_dirstate:
904 if use_dirstate:
904 self.dirstate.normal(f)
905 self.dirstate.normal(f)
905
906
906 except (OSError, IOError):
907 except (OSError, IOError):
907 if use_dirstate:
908 if use_dirstate:
908 self.ui.warn(_("trouble committing %s!\n") % f)
909 self.ui.warn(_("trouble committing %s!\n") % f)
909 raise
910 raise
910 else:
911 else:
911 remove.append(f)
912 remove.append(f)
912
913
913 updated, added = [], []
914 updated, added = [], []
914 for f in util.sort(changed):
915 for f in util.sort(changed):
915 if f in m1 or f in m2:
916 if f in m1 or f in m2:
916 updated.append(f)
917 updated.append(f)
917 else:
918 else:
918 added.append(f)
919 added.append(f)
919
920
920 # update manifest
921 # update manifest
921 m1.update(new)
922 m1.update(new)
922 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
923 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
923 removed1 = []
924 removed1 = []
924
925
925 for f in removed:
926 for f in removed:
926 if f in m1:
927 if f in m1:
927 del m1[f]
928 del m1[f]
928 removed1.append(f)
929 removed1.append(f)
929 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
930 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
930 (new, removed1))
931 (new, removed1))
931
932
932 # add changeset
933 # add changeset
933 if (not empty_ok and not text) or force_editor:
934 if (not empty_ok and not text) or force_editor:
934 edittext = []
935 edittext = []
935 if text:
936 if text:
936 edittext.append(text)
937 edittext.append(text)
937 edittext.append("")
938 edittext.append("")
938 edittext.append("") # Empty line between message and comments.
939 edittext.append("") # Empty line between message and comments.
939 edittext.append(_("HG: Enter commit message."
940 edittext.append(_("HG: Enter commit message."
940 " Lines beginning with 'HG:' are removed."))
941 " Lines beginning with 'HG:' are removed."))
941 edittext.append("HG: --")
942 edittext.append("HG: --")
942 edittext.append("HG: user: %s" % user)
943 edittext.append("HG: user: %s" % user)
943 if p2 != nullid:
944 if p2 != nullid:
944 edittext.append("HG: branch merge")
945 edittext.append("HG: branch merge")
945 if branchname:
946 if branchname:
946 edittext.append("HG: branch '%s'"
947 edittext.append("HG: branch '%s'"
947 % encoding.tolocal(branchname))
948 % encoding.tolocal(branchname))
948 edittext.extend(["HG: added %s" % f for f in added])
949 edittext.extend(["HG: added %s" % f for f in added])
949 edittext.extend(["HG: changed %s" % f for f in updated])
950 edittext.extend(["HG: changed %s" % f for f in updated])
950 edittext.extend(["HG: removed %s" % f for f in removed])
951 edittext.extend(["HG: removed %s" % f for f in removed])
951 if not added and not updated and not removed:
952 if not added and not updated and not removed:
952 edittext.append("HG: no files changed")
953 edittext.append("HG: no files changed")
953 edittext.append("")
954 edittext.append("")
954 # run editor in the repository root
955 # run editor in the repository root
955 olddir = os.getcwd()
956 olddir = os.getcwd()
956 os.chdir(self.root)
957 os.chdir(self.root)
957 text = self.ui.edit("\n".join(edittext), user)
958 text = self.ui.edit("\n".join(edittext), user)
958 os.chdir(olddir)
959 os.chdir(olddir)
959
960
960 lines = [line.rstrip() for line in text.rstrip().splitlines()]
961 lines = [line.rstrip() for line in text.rstrip().splitlines()]
961 while lines and not lines[0]:
962 while lines and not lines[0]:
962 del lines[0]
963 del lines[0]
963 if not lines and use_dirstate:
964 if not lines and use_dirstate:
964 raise util.Abort(_("empty commit message"))
965 raise util.Abort(_("empty commit message"))
965 text = '\n'.join(lines)
966 text = '\n'.join(lines)
966
967
967 self.changelog.delayupdate()
968 self.changelog.delayupdate()
968 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
969 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
969 user, wctx.date(), extra)
970 user, wctx.date(), extra)
970 p = lambda: self.changelog.writepending() and self.root or ""
971 p = lambda: self.changelog.writepending() and self.root or ""
971 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
972 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
972 parent2=xp2, pending=p)
973 parent2=xp2, pending=p)
973 self.changelog.finalize(trp)
974 self.changelog.finalize(trp)
974 tr.close()
975 tr.close()
975
976
976 if self.branchcache:
977 if self.branchcache:
977 self.branchtags()
978 self.branchtags()
978
979
979 if use_dirstate or update_dirstate:
980 if use_dirstate or update_dirstate:
980 self.dirstate.setparents(n)
981 self.dirstate.setparents(n)
981 if use_dirstate:
982 if use_dirstate:
982 for f in removed:
983 for f in removed:
983 self.dirstate.forget(f)
984 self.dirstate.forget(f)
984 valid = 1 # our dirstate updates are complete
985 valid = 1 # our dirstate updates are complete
985
986
986 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
987 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
987 return n
988 return n
988 finally:
989 finally:
989 if not valid: # don't save our updated dirstate
990 if not valid: # don't save our updated dirstate
990 self.dirstate.invalidate()
991 self.dirstate.invalidate()
991 del tr
992 del tr
992
993
993 def walk(self, match, node=None):
994 def walk(self, match, node=None):
994 '''
995 '''
995 walk recursively through the directory tree or a given
996 walk recursively through the directory tree or a given
996 changeset, finding all files matched by the match
997 changeset, finding all files matched by the match
997 function
998 function
998 '''
999 '''
999 return self[node].walk(match)
1000 return self[node].walk(match)
1000
1001
1001 def status(self, node1='.', node2=None, match=None,
1002 def status(self, node1='.', node2=None, match=None,
1002 ignored=False, clean=False, unknown=False):
1003 ignored=False, clean=False, unknown=False):
1003 """return status of files between two nodes or node and working directory
1004 """return status of files between two nodes or node and working directory
1004
1005
1005 If node1 is None, use the first dirstate parent instead.
1006 If node1 is None, use the first dirstate parent instead.
1006 If node2 is None, compare node1 with working directory.
1007 If node2 is None, compare node1 with working directory.
1007 """
1008 """
1008
1009
1009 def mfmatches(ctx):
1010 def mfmatches(ctx):
1010 mf = ctx.manifest().copy()
1011 mf = ctx.manifest().copy()
1011 for fn in mf.keys():
1012 for fn in mf.keys():
1012 if not match(fn):
1013 if not match(fn):
1013 del mf[fn]
1014 del mf[fn]
1014 return mf
1015 return mf
1015
1016
1016 if isinstance(node1, context.changectx):
1017 if isinstance(node1, context.changectx):
1017 ctx1 = node1
1018 ctx1 = node1
1018 else:
1019 else:
1019 ctx1 = self[node1]
1020 ctx1 = self[node1]
1020 if isinstance(node2, context.changectx):
1021 if isinstance(node2, context.changectx):
1021 ctx2 = node2
1022 ctx2 = node2
1022 else:
1023 else:
1023 ctx2 = self[node2]
1024 ctx2 = self[node2]
1024
1025
1025 working = ctx2.rev() is None
1026 working = ctx2.rev() is None
1026 parentworking = working and ctx1 == self['.']
1027 parentworking = working and ctx1 == self['.']
1027 match = match or match_.always(self.root, self.getcwd())
1028 match = match or match_.always(self.root, self.getcwd())
1028 listignored, listclean, listunknown = ignored, clean, unknown
1029 listignored, listclean, listunknown = ignored, clean, unknown
1029
1030
1030 # load earliest manifest first for caching reasons
1031 # load earliest manifest first for caching reasons
1031 if not working and ctx2.rev() < ctx1.rev():
1032 if not working and ctx2.rev() < ctx1.rev():
1032 ctx2.manifest()
1033 ctx2.manifest()
1033
1034
1034 if not parentworking:
1035 if not parentworking:
1035 def bad(f, msg):
1036 def bad(f, msg):
1036 if f not in ctx1:
1037 if f not in ctx1:
1037 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1038 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1038 return False
1039 return False
1039 match.bad = bad
1040 match.bad = bad
1040
1041
1041 if working: # we need to scan the working dir
1042 if working: # we need to scan the working dir
1042 s = self.dirstate.status(match, listignored, listclean, listunknown)
1043 s = self.dirstate.status(match, listignored, listclean, listunknown)
1043 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1044 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1044
1045
1045 # check for any possibly clean files
1046 # check for any possibly clean files
1046 if parentworking and cmp:
1047 if parentworking and cmp:
1047 fixup = []
1048 fixup = []
1048 # do a full compare of any files that might have changed
1049 # do a full compare of any files that might have changed
1049 for f in cmp:
1050 for f in cmp:
1050 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1051 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1051 or ctx1[f].cmp(ctx2[f].data())):
1052 or ctx1[f].cmp(ctx2[f].data())):
1052 modified.append(f)
1053 modified.append(f)
1053 else:
1054 else:
1054 fixup.append(f)
1055 fixup.append(f)
1055
1056
1056 if listclean:
1057 if listclean:
1057 clean += fixup
1058 clean += fixup
1058
1059
1059 # update dirstate for files that are actually clean
1060 # update dirstate for files that are actually clean
1060 if fixup:
1061 if fixup:
1061 wlock = None
1062 wlock = None
1062 try:
1063 try:
1063 try:
1064 try:
1064 wlock = self.wlock(False)
1065 wlock = self.wlock(False)
1065 for f in fixup:
1066 for f in fixup:
1066 self.dirstate.normal(f)
1067 self.dirstate.normal(f)
1067 except error.LockError:
1068 except error.LockError:
1068 pass
1069 pass
1069 finally:
1070 finally:
1070 del wlock
1071 del wlock
1071
1072
1072 if not parentworking:
1073 if not parentworking:
1073 mf1 = mfmatches(ctx1)
1074 mf1 = mfmatches(ctx1)
1074 if working:
1075 if working:
1075 # we are comparing working dir against non-parent
1076 # we are comparing working dir against non-parent
1076 # generate a pseudo-manifest for the working dir
1077 # generate a pseudo-manifest for the working dir
1077 mf2 = mfmatches(self['.'])
1078 mf2 = mfmatches(self['.'])
1078 for f in cmp + modified + added:
1079 for f in cmp + modified + added:
1079 mf2[f] = None
1080 mf2[f] = None
1080 mf2.set(f, ctx2.flags(f))
1081 mf2.set(f, ctx2.flags(f))
1081 for f in removed:
1082 for f in removed:
1082 if f in mf2:
1083 if f in mf2:
1083 del mf2[f]
1084 del mf2[f]
1084 else:
1085 else:
1085 # we are comparing two revisions
1086 # we are comparing two revisions
1086 deleted, unknown, ignored = [], [], []
1087 deleted, unknown, ignored = [], [], []
1087 mf2 = mfmatches(ctx2)
1088 mf2 = mfmatches(ctx2)
1088
1089
1089 modified, added, clean = [], [], []
1090 modified, added, clean = [], [], []
1090 for fn in mf2:
1091 for fn in mf2:
1091 if fn in mf1:
1092 if fn in mf1:
1092 if (mf1.flags(fn) != mf2.flags(fn) or
1093 if (mf1.flags(fn) != mf2.flags(fn) or
1093 (mf1[fn] != mf2[fn] and
1094 (mf1[fn] != mf2[fn] and
1094 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1095 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1095 modified.append(fn)
1096 modified.append(fn)
1096 elif listclean:
1097 elif listclean:
1097 clean.append(fn)
1098 clean.append(fn)
1098 del mf1[fn]
1099 del mf1[fn]
1099 else:
1100 else:
1100 added.append(fn)
1101 added.append(fn)
1101 removed = mf1.keys()
1102 removed = mf1.keys()
1102
1103
1103 r = modified, added, removed, deleted, unknown, ignored, clean
1104 r = modified, added, removed, deleted, unknown, ignored, clean
1104 [l.sort() for l in r]
1105 [l.sort() for l in r]
1105 return r
1106 return r
1106
1107
1107 def add(self, list):
1108 def add(self, list):
1108 wlock = self.wlock()
1109 wlock = self.wlock()
1109 try:
1110 try:
1110 rejected = []
1111 rejected = []
1111 for f in list:
1112 for f in list:
1112 p = self.wjoin(f)
1113 p = self.wjoin(f)
1113 try:
1114 try:
1114 st = os.lstat(p)
1115 st = os.lstat(p)
1115 except:
1116 except:
1116 self.ui.warn(_("%s does not exist!\n") % f)
1117 self.ui.warn(_("%s does not exist!\n") % f)
1117 rejected.append(f)
1118 rejected.append(f)
1118 continue
1119 continue
1119 if st.st_size > 10000000:
1120 if st.st_size > 10000000:
1120 self.ui.warn(_("%s: files over 10MB may cause memory and"
1121 self.ui.warn(_("%s: files over 10MB may cause memory and"
1121 " performance problems\n"
1122 " performance problems\n"
1122 "(use 'hg revert %s' to unadd the file)\n")
1123 "(use 'hg revert %s' to unadd the file)\n")
1123 % (f, f))
1124 % (f, f))
1124 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1125 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1125 self.ui.warn(_("%s not added: only files and symlinks "
1126 self.ui.warn(_("%s not added: only files and symlinks "
1126 "supported currently\n") % f)
1127 "supported currently\n") % f)
1127 rejected.append(p)
1128 rejected.append(p)
1128 elif self.dirstate[f] in 'amn':
1129 elif self.dirstate[f] in 'amn':
1129 self.ui.warn(_("%s already tracked!\n") % f)
1130 self.ui.warn(_("%s already tracked!\n") % f)
1130 elif self.dirstate[f] == 'r':
1131 elif self.dirstate[f] == 'r':
1131 self.dirstate.normallookup(f)
1132 self.dirstate.normallookup(f)
1132 else:
1133 else:
1133 self.dirstate.add(f)
1134 self.dirstate.add(f)
1134 return rejected
1135 return rejected
1135 finally:
1136 finally:
1136 del wlock
1137 del wlock
1137
1138
1138 def forget(self, list):
1139 def forget(self, list):
1139 wlock = self.wlock()
1140 wlock = self.wlock()
1140 try:
1141 try:
1141 for f in list:
1142 for f in list:
1142 if self.dirstate[f] != 'a':
1143 if self.dirstate[f] != 'a':
1143 self.ui.warn(_("%s not added!\n") % f)
1144 self.ui.warn(_("%s not added!\n") % f)
1144 else:
1145 else:
1145 self.dirstate.forget(f)
1146 self.dirstate.forget(f)
1146 finally:
1147 finally:
1147 del wlock
1148 del wlock
1148
1149
1149 def remove(self, list, unlink=False):
1150 def remove(self, list, unlink=False):
1150 wlock = None
1151 wlock = None
1151 try:
1152 try:
1152 if unlink:
1153 if unlink:
1153 for f in list:
1154 for f in list:
1154 try:
1155 try:
1155 util.unlink(self.wjoin(f))
1156 util.unlink(self.wjoin(f))
1156 except OSError, inst:
1157 except OSError, inst:
1157 if inst.errno != errno.ENOENT:
1158 if inst.errno != errno.ENOENT:
1158 raise
1159 raise
1159 wlock = self.wlock()
1160 wlock = self.wlock()
1160 for f in list:
1161 for f in list:
1161 if unlink and os.path.exists(self.wjoin(f)):
1162 if unlink and os.path.exists(self.wjoin(f)):
1162 self.ui.warn(_("%s still exists!\n") % f)
1163 self.ui.warn(_("%s still exists!\n") % f)
1163 elif self.dirstate[f] == 'a':
1164 elif self.dirstate[f] == 'a':
1164 self.dirstate.forget(f)
1165 self.dirstate.forget(f)
1165 elif f not in self.dirstate:
1166 elif f not in self.dirstate:
1166 self.ui.warn(_("%s not tracked!\n") % f)
1167 self.ui.warn(_("%s not tracked!\n") % f)
1167 else:
1168 else:
1168 self.dirstate.remove(f)
1169 self.dirstate.remove(f)
1169 finally:
1170 finally:
1170 del wlock
1171 del wlock
1171
1172
1172 def undelete(self, list):
1173 def undelete(self, list):
1173 wlock = None
1174 wlock = None
1174 try:
1175 try:
1175 manifests = [self.manifest.read(self.changelog.read(p)[0])
1176 manifests = [self.manifest.read(self.changelog.read(p)[0])
1176 for p in self.dirstate.parents() if p != nullid]
1177 for p in self.dirstate.parents() if p != nullid]
1177 wlock = self.wlock()
1178 wlock = self.wlock()
1178 for f in list:
1179 for f in list:
1179 if self.dirstate[f] != 'r':
1180 if self.dirstate[f] != 'r':
1180 self.ui.warn(_("%s not removed!\n") % f)
1181 self.ui.warn(_("%s not removed!\n") % f)
1181 else:
1182 else:
1182 m = f in manifests[0] and manifests[0] or manifests[1]
1183 m = f in manifests[0] and manifests[0] or manifests[1]
1183 t = self.file(f).read(m[f])
1184 t = self.file(f).read(m[f])
1184 self.wwrite(f, t, m.flags(f))
1185 self.wwrite(f, t, m.flags(f))
1185 self.dirstate.normal(f)
1186 self.dirstate.normal(f)
1186 finally:
1187 finally:
1187 del wlock
1188 del wlock
1188
1189
1189 def copy(self, source, dest):
1190 def copy(self, source, dest):
1190 wlock = None
1191 wlock = None
1191 try:
1192 try:
1192 p = self.wjoin(dest)
1193 p = self.wjoin(dest)
1193 if not (os.path.exists(p) or os.path.islink(p)):
1194 if not (os.path.exists(p) or os.path.islink(p)):
1194 self.ui.warn(_("%s does not exist!\n") % dest)
1195 self.ui.warn(_("%s does not exist!\n") % dest)
1195 elif not (os.path.isfile(p) or os.path.islink(p)):
1196 elif not (os.path.isfile(p) or os.path.islink(p)):
1196 self.ui.warn(_("copy failed: %s is not a file or a "
1197 self.ui.warn(_("copy failed: %s is not a file or a "
1197 "symbolic link\n") % dest)
1198 "symbolic link\n") % dest)
1198 else:
1199 else:
1199 wlock = self.wlock()
1200 wlock = self.wlock()
1200 if self.dirstate[dest] in '?r':
1201 if self.dirstate[dest] in '?r':
1201 self.dirstate.add(dest)
1202 self.dirstate.add(dest)
1202 self.dirstate.copy(source, dest)
1203 self.dirstate.copy(source, dest)
1203 finally:
1204 finally:
1204 del wlock
1205 del wlock
1205
1206
1206 def heads(self, start=None, closed=True):
1207 def heads(self, start=None, closed=True):
1207 heads = self.changelog.heads(start)
1208 heads = self.changelog.heads(start)
1208 def display(head):
1209 def display(head):
1209 if closed:
1210 if closed:
1210 return True
1211 return True
1211 extras = self.changelog.read(head)[5]
1212 extras = self.changelog.read(head)[5]
1212 return ('close' not in extras)
1213 return ('close' not in extras)
1213 # sort the output in rev descending order
1214 # sort the output in rev descending order
1214 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1215 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1215 return [n for (r, n) in util.sort(heads)]
1216 return [n for (r, n) in util.sort(heads)]
1216
1217
1217 def branchheads(self, branch=None, start=None, closed=True):
1218 def branchheads(self, branch=None, start=None, closed=True):
1218 if branch is None:
1219 if branch is None:
1219 branch = self[None].branch()
1220 branch = self[None].branch()
1220 branches = self._branchheads()
1221 branches = self._branchheads()
1221 if branch not in branches:
1222 if branch not in branches:
1222 return []
1223 return []
1223 bheads = branches[branch]
1224 bheads = branches[branch]
1224 # the cache returns heads ordered lowest to highest
1225 # the cache returns heads ordered lowest to highest
1225 bheads.reverse()
1226 bheads.reverse()
1226 if start is not None:
1227 if start is not None:
1227 # filter out the heads that cannot be reached from startrev
1228 # filter out the heads that cannot be reached from startrev
1228 bheads = self.changelog.nodesbetween([start], bheads)[2]
1229 bheads = self.changelog.nodesbetween([start], bheads)[2]
1229 if not closed:
1230 if not closed:
1230 bheads = [h for h in bheads if
1231 bheads = [h for h in bheads if
1231 ('close' not in self.changelog.read(h)[5])]
1232 ('close' not in self.changelog.read(h)[5])]
1232 return bheads
1233 return bheads
1233
1234
1234 def branches(self, nodes):
1235 def branches(self, nodes):
1235 if not nodes:
1236 if not nodes:
1236 nodes = [self.changelog.tip()]
1237 nodes = [self.changelog.tip()]
1237 b = []
1238 b = []
1238 for n in nodes:
1239 for n in nodes:
1239 t = n
1240 t = n
1240 while 1:
1241 while 1:
1241 p = self.changelog.parents(n)
1242 p = self.changelog.parents(n)
1242 if p[1] != nullid or p[0] == nullid:
1243 if p[1] != nullid or p[0] == nullid:
1243 b.append((t, n, p[0], p[1]))
1244 b.append((t, n, p[0], p[1]))
1244 break
1245 break
1245 n = p[0]
1246 n = p[0]
1246 return b
1247 return b
1247
1248
1248 def between(self, pairs):
1249 def between(self, pairs):
1249 r = []
1250 r = []
1250
1251
1251 for top, bottom in pairs:
1252 for top, bottom in pairs:
1252 n, l, i = top, [], 0
1253 n, l, i = top, [], 0
1253 f = 1
1254 f = 1
1254
1255
1255 while n != bottom and n != nullid:
1256 while n != bottom and n != nullid:
1256 p = self.changelog.parents(n)[0]
1257 p = self.changelog.parents(n)[0]
1257 if i == f:
1258 if i == f:
1258 l.append(n)
1259 l.append(n)
1259 f = f * 2
1260 f = f * 2
1260 n = p
1261 n = p
1261 i += 1
1262 i += 1
1262
1263
1263 r.append(l)
1264 r.append(l)
1264
1265
1265 return r
1266 return r
1266
1267
1267 def findincoming(self, remote, base=None, heads=None, force=False):
1268 def findincoming(self, remote, base=None, heads=None, force=False):
1268 """Return list of roots of the subsets of missing nodes from remote
1269 """Return list of roots of the subsets of missing nodes from remote
1269
1270
1270 If base dict is specified, assume that these nodes and their parents
1271 If base dict is specified, assume that these nodes and their parents
1271 exist on the remote side and that no child of a node of base exists
1272 exist on the remote side and that no child of a node of base exists
1272 in both remote and self.
1273 in both remote and self.
1273 Furthermore base will be updated to include the nodes that exists
1274 Furthermore base will be updated to include the nodes that exists
1274 in self and remote but no children exists in self and remote.
1275 in self and remote but no children exists in self and remote.
1275 If a list of heads is specified, return only nodes which are heads
1276 If a list of heads is specified, return only nodes which are heads
1276 or ancestors of these heads.
1277 or ancestors of these heads.
1277
1278
1278 All the ancestors of base are in self and in remote.
1279 All the ancestors of base are in self and in remote.
1279 All the descendants of the list returned are missing in self.
1280 All the descendants of the list returned are missing in self.
1280 (and so we know that the rest of the nodes are missing in remote, see
1281 (and so we know that the rest of the nodes are missing in remote, see
1281 outgoing)
1282 outgoing)
1282 """
1283 """
1283 return self.findcommonincoming(remote, base, heads, force)[1]
1284 return self.findcommonincoming(remote, base, heads, force)[1]
1284
1285
1285 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1286 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1286 """Return a tuple (common, missing roots, heads) used to identify
1287 """Return a tuple (common, missing roots, heads) used to identify
1287 missing nodes from remote.
1288 missing nodes from remote.
1288
1289
1289 If base dict is specified, assume that these nodes and their parents
1290 If base dict is specified, assume that these nodes and their parents
1290 exist on the remote side and that no child of a node of base exists
1291 exist on the remote side and that no child of a node of base exists
1291 in both remote and self.
1292 in both remote and self.
1292 Furthermore base will be updated to include the nodes that exists
1293 Furthermore base will be updated to include the nodes that exists
1293 in self and remote but no children exists in self and remote.
1294 in self and remote but no children exists in self and remote.
1294 If a list of heads is specified, return only nodes which are heads
1295 If a list of heads is specified, return only nodes which are heads
1295 or ancestors of these heads.
1296 or ancestors of these heads.
1296
1297
1297 All the ancestors of base are in self and in remote.
1298 All the ancestors of base are in self and in remote.
1298 """
1299 """
1299 m = self.changelog.nodemap
1300 m = self.changelog.nodemap
1300 search = []
1301 search = []
1301 fetch = {}
1302 fetch = {}
1302 seen = {}
1303 seen = {}
1303 seenbranch = {}
1304 seenbranch = {}
1304 if base == None:
1305 if base == None:
1305 base = {}
1306 base = {}
1306
1307
1307 if not heads:
1308 if not heads:
1308 heads = remote.heads()
1309 heads = remote.heads()
1309
1310
1310 if self.changelog.tip() == nullid:
1311 if self.changelog.tip() == nullid:
1311 base[nullid] = 1
1312 base[nullid] = 1
1312 if heads != [nullid]:
1313 if heads != [nullid]:
1313 return [nullid], [nullid], list(heads)
1314 return [nullid], [nullid], list(heads)
1314 return [nullid], [], []
1315 return [nullid], [], []
1315
1316
1316 # assume we're closer to the tip than the root
1317 # assume we're closer to the tip than the root
1317 # and start by examining the heads
1318 # and start by examining the heads
1318 self.ui.status(_("searching for changes\n"))
1319 self.ui.status(_("searching for changes\n"))
1319
1320
1320 unknown = []
1321 unknown = []
1321 for h in heads:
1322 for h in heads:
1322 if h not in m:
1323 if h not in m:
1323 unknown.append(h)
1324 unknown.append(h)
1324 else:
1325 else:
1325 base[h] = 1
1326 base[h] = 1
1326
1327
1327 heads = unknown
1328 heads = unknown
1328 if not unknown:
1329 if not unknown:
1329 return base.keys(), [], []
1330 return base.keys(), [], []
1330
1331
1331 req = dict.fromkeys(unknown)
1332 req = dict.fromkeys(unknown)
1332 reqcnt = 0
1333 reqcnt = 0
1333
1334
1334 # search through remote branches
1335 # search through remote branches
1335 # a 'branch' here is a linear segment of history, with four parts:
1336 # a 'branch' here is a linear segment of history, with four parts:
1336 # head, root, first parent, second parent
1337 # head, root, first parent, second parent
1337 # (a branch always has two parents (or none) by definition)
1338 # (a branch always has two parents (or none) by definition)
1338 unknown = remote.branches(unknown)
1339 unknown = remote.branches(unknown)
1339 while unknown:
1340 while unknown:
1340 r = []
1341 r = []
1341 while unknown:
1342 while unknown:
1342 n = unknown.pop(0)
1343 n = unknown.pop(0)
1343 if n[0] in seen:
1344 if n[0] in seen:
1344 continue
1345 continue
1345
1346
1346 self.ui.debug(_("examining %s:%s\n")
1347 self.ui.debug(_("examining %s:%s\n")
1347 % (short(n[0]), short(n[1])))
1348 % (short(n[0]), short(n[1])))
1348 if n[0] == nullid: # found the end of the branch
1349 if n[0] == nullid: # found the end of the branch
1349 pass
1350 pass
1350 elif n in seenbranch:
1351 elif n in seenbranch:
1351 self.ui.debug(_("branch already found\n"))
1352 self.ui.debug(_("branch already found\n"))
1352 continue
1353 continue
1353 elif n[1] and n[1] in m: # do we know the base?
1354 elif n[1] and n[1] in m: # do we know the base?
1354 self.ui.debug(_("found incomplete branch %s:%s\n")
1355 self.ui.debug(_("found incomplete branch %s:%s\n")
1355 % (short(n[0]), short(n[1])))
1356 % (short(n[0]), short(n[1])))
1356 search.append(n[0:2]) # schedule branch range for scanning
1357 search.append(n[0:2]) # schedule branch range for scanning
1357 seenbranch[n] = 1
1358 seenbranch[n] = 1
1358 else:
1359 else:
1359 if n[1] not in seen and n[1] not in fetch:
1360 if n[1] not in seen and n[1] not in fetch:
1360 if n[2] in m and n[3] in m:
1361 if n[2] in m and n[3] in m:
1361 self.ui.debug(_("found new changeset %s\n") %
1362 self.ui.debug(_("found new changeset %s\n") %
1362 short(n[1]))
1363 short(n[1]))
1363 fetch[n[1]] = 1 # earliest unknown
1364 fetch[n[1]] = 1 # earliest unknown
1364 for p in n[2:4]:
1365 for p in n[2:4]:
1365 if p in m:
1366 if p in m:
1366 base[p] = 1 # latest known
1367 base[p] = 1 # latest known
1367
1368
1368 for p in n[2:4]:
1369 for p in n[2:4]:
1369 if p not in req and p not in m:
1370 if p not in req and p not in m:
1370 r.append(p)
1371 r.append(p)
1371 req[p] = 1
1372 req[p] = 1
1372 seen[n[0]] = 1
1373 seen[n[0]] = 1
1373
1374
1374 if r:
1375 if r:
1375 reqcnt += 1
1376 reqcnt += 1
1376 self.ui.debug(_("request %d: %s\n") %
1377 self.ui.debug(_("request %d: %s\n") %
1377 (reqcnt, " ".join(map(short, r))))
1378 (reqcnt, " ".join(map(short, r))))
1378 for p in xrange(0, len(r), 10):
1379 for p in xrange(0, len(r), 10):
1379 for b in remote.branches(r[p:p+10]):
1380 for b in remote.branches(r[p:p+10]):
1380 self.ui.debug(_("received %s:%s\n") %
1381 self.ui.debug(_("received %s:%s\n") %
1381 (short(b[0]), short(b[1])))
1382 (short(b[0]), short(b[1])))
1382 unknown.append(b)
1383 unknown.append(b)
1383
1384
1384 # do binary search on the branches we found
1385 # do binary search on the branches we found
1385 while search:
1386 while search:
1386 newsearch = []
1387 newsearch = []
1387 reqcnt += 1
1388 reqcnt += 1
1388 for n, l in zip(search, remote.between(search)):
1389 for n, l in zip(search, remote.between(search)):
1389 l.append(n[1])
1390 l.append(n[1])
1390 p = n[0]
1391 p = n[0]
1391 f = 1
1392 f = 1
1392 for i in l:
1393 for i in l:
1393 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1394 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1394 if i in m:
1395 if i in m:
1395 if f <= 2:
1396 if f <= 2:
1396 self.ui.debug(_("found new branch changeset %s\n") %
1397 self.ui.debug(_("found new branch changeset %s\n") %
1397 short(p))
1398 short(p))
1398 fetch[p] = 1
1399 fetch[p] = 1
1399 base[i] = 1
1400 base[i] = 1
1400 else:
1401 else:
1401 self.ui.debug(_("narrowed branch search to %s:%s\n")
1402 self.ui.debug(_("narrowed branch search to %s:%s\n")
1402 % (short(p), short(i)))
1403 % (short(p), short(i)))
1403 newsearch.append((p, i))
1404 newsearch.append((p, i))
1404 break
1405 break
1405 p, f = i, f * 2
1406 p, f = i, f * 2
1406 search = newsearch
1407 search = newsearch
1407
1408
1408 # sanity check our fetch list
1409 # sanity check our fetch list
1409 for f in fetch.keys():
1410 for f in fetch.keys():
1410 if f in m:
1411 if f in m:
1411 raise error.RepoError(_("already have changeset ")
1412 raise error.RepoError(_("already have changeset ")
1412 + short(f[:4]))
1413 + short(f[:4]))
1413
1414
1414 if base.keys() == [nullid]:
1415 if base.keys() == [nullid]:
1415 if force:
1416 if force:
1416 self.ui.warn(_("warning: repository is unrelated\n"))
1417 self.ui.warn(_("warning: repository is unrelated\n"))
1417 else:
1418 else:
1418 raise util.Abort(_("repository is unrelated"))
1419 raise util.Abort(_("repository is unrelated"))
1419
1420
1420 self.ui.debug(_("found new changesets starting at ") +
1421 self.ui.debug(_("found new changesets starting at ") +
1421 " ".join([short(f) for f in fetch]) + "\n")
1422 " ".join([short(f) for f in fetch]) + "\n")
1422
1423
1423 self.ui.debug(_("%d total queries\n") % reqcnt)
1424 self.ui.debug(_("%d total queries\n") % reqcnt)
1424
1425
1425 return base.keys(), fetch.keys(), heads
1426 return base.keys(), fetch.keys(), heads
1426
1427
1427 def findoutgoing(self, remote, base=None, heads=None, force=False):
1428 def findoutgoing(self, remote, base=None, heads=None, force=False):
1428 """Return list of nodes that are roots of subsets not in remote
1429 """Return list of nodes that are roots of subsets not in remote
1429
1430
1430 If base dict is specified, assume that these nodes and their parents
1431 If base dict is specified, assume that these nodes and their parents
1431 exist on the remote side.
1432 exist on the remote side.
1432 If a list of heads is specified, return only nodes which are heads
1433 If a list of heads is specified, return only nodes which are heads
1433 or ancestors of these heads, and return a second element which
1434 or ancestors of these heads, and return a second element which
1434 contains all remote heads which get new children.
1435 contains all remote heads which get new children.
1435 """
1436 """
1436 if base == None:
1437 if base == None:
1437 base = {}
1438 base = {}
1438 self.findincoming(remote, base, heads, force=force)
1439 self.findincoming(remote, base, heads, force=force)
1439
1440
1440 self.ui.debug(_("common changesets up to ")
1441 self.ui.debug(_("common changesets up to ")
1441 + " ".join(map(short, base.keys())) + "\n")
1442 + " ".join(map(short, base.keys())) + "\n")
1442
1443
1443 remain = dict.fromkeys(self.changelog.nodemap)
1444 remain = dict.fromkeys(self.changelog.nodemap)
1444
1445
1445 # prune everything remote has from the tree
1446 # prune everything remote has from the tree
1446 del remain[nullid]
1447 del remain[nullid]
1447 remove = base.keys()
1448 remove = base.keys()
1448 while remove:
1449 while remove:
1449 n = remove.pop(0)
1450 n = remove.pop(0)
1450 if n in remain:
1451 if n in remain:
1451 del remain[n]
1452 del remain[n]
1452 for p in self.changelog.parents(n):
1453 for p in self.changelog.parents(n):
1453 remove.append(p)
1454 remove.append(p)
1454
1455
1455 # find every node whose parents have been pruned
1456 # find every node whose parents have been pruned
1456 subset = []
1457 subset = []
1457 # find every remote head that will get new children
1458 # find every remote head that will get new children
1458 updated_heads = {}
1459 updated_heads = {}
1459 for n in remain:
1460 for n in remain:
1460 p1, p2 = self.changelog.parents(n)
1461 p1, p2 = self.changelog.parents(n)
1461 if p1 not in remain and p2 not in remain:
1462 if p1 not in remain and p2 not in remain:
1462 subset.append(n)
1463 subset.append(n)
1463 if heads:
1464 if heads:
1464 if p1 in heads:
1465 if p1 in heads:
1465 updated_heads[p1] = True
1466 updated_heads[p1] = True
1466 if p2 in heads:
1467 if p2 in heads:
1467 updated_heads[p2] = True
1468 updated_heads[p2] = True
1468
1469
1469 # this is the set of all roots we have to push
1470 # this is the set of all roots we have to push
1470 if heads:
1471 if heads:
1471 return subset, updated_heads.keys()
1472 return subset, updated_heads.keys()
1472 else:
1473 else:
1473 return subset
1474 return subset
1474
1475
1475 def pull(self, remote, heads=None, force=False):
1476 def pull(self, remote, heads=None, force=False):
1476 lock = self.lock()
1477 lock = self.lock()
1477 try:
1478 try:
1478 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1479 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1479 force=force)
1480 force=force)
1480 if fetch == [nullid]:
1481 if fetch == [nullid]:
1481 self.ui.status(_("requesting all changes\n"))
1482 self.ui.status(_("requesting all changes\n"))
1482
1483
1483 if not fetch:
1484 if not fetch:
1484 self.ui.status(_("no changes found\n"))
1485 self.ui.status(_("no changes found\n"))
1485 return 0
1486 return 0
1486
1487
1487 if heads is None and remote.capable('changegroupsubset'):
1488 if heads is None and remote.capable('changegroupsubset'):
1488 heads = rheads
1489 heads = rheads
1489
1490
1490 if heads is None:
1491 if heads is None:
1491 cg = remote.changegroup(fetch, 'pull')
1492 cg = remote.changegroup(fetch, 'pull')
1492 else:
1493 else:
1493 if not remote.capable('changegroupsubset'):
1494 if not remote.capable('changegroupsubset'):
1494 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1495 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1495 cg = remote.changegroupsubset(fetch, heads, 'pull')
1496 cg = remote.changegroupsubset(fetch, heads, 'pull')
1496 return self.addchangegroup(cg, 'pull', remote.url())
1497 return self.addchangegroup(cg, 'pull', remote.url())
1497 finally:
1498 finally:
1498 del lock
1499 del lock
1499
1500
1500 def push(self, remote, force=False, revs=None):
1501 def push(self, remote, force=False, revs=None):
1501 # there are two ways to push to remote repo:
1502 # there are two ways to push to remote repo:
1502 #
1503 #
1503 # addchangegroup assumes local user can lock remote
1504 # addchangegroup assumes local user can lock remote
1504 # repo (local filesystem, old ssh servers).
1505 # repo (local filesystem, old ssh servers).
1505 #
1506 #
1506 # unbundle assumes local user cannot lock remote repo (new ssh
1507 # unbundle assumes local user cannot lock remote repo (new ssh
1507 # servers, http servers).
1508 # servers, http servers).
1508
1509
1509 if remote.capable('unbundle'):
1510 if remote.capable('unbundle'):
1510 return self.push_unbundle(remote, force, revs)
1511 return self.push_unbundle(remote, force, revs)
1511 return self.push_addchangegroup(remote, force, revs)
1512 return self.push_addchangegroup(remote, force, revs)
1512
1513
1513 def prepush(self, remote, force, revs):
1514 def prepush(self, remote, force, revs):
1514 common = {}
1515 common = {}
1515 remote_heads = remote.heads()
1516 remote_heads = remote.heads()
1516 inc = self.findincoming(remote, common, remote_heads, force=force)
1517 inc = self.findincoming(remote, common, remote_heads, force=force)
1517
1518
1518 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1519 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1519 if revs is not None:
1520 if revs is not None:
1520 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1521 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1521 else:
1522 else:
1522 bases, heads = update, self.changelog.heads()
1523 bases, heads = update, self.changelog.heads()
1523
1524
1524 if not bases:
1525 if not bases:
1525 self.ui.status(_("no changes found\n"))
1526 self.ui.status(_("no changes found\n"))
1526 return None, 1
1527 return None, 1
1527 elif not force:
1528 elif not force:
1528 # check if we're creating new remote heads
1529 # check if we're creating new remote heads
1529 # to be a remote head after push, node must be either
1530 # to be a remote head after push, node must be either
1530 # - unknown locally
1531 # - unknown locally
1531 # - a local outgoing head descended from update
1532 # - a local outgoing head descended from update
1532 # - a remote head that's known locally and not
1533 # - a remote head that's known locally and not
1533 # ancestral to an outgoing head
1534 # ancestral to an outgoing head
1534
1535
1535 warn = 0
1536 warn = 0
1536
1537
1537 if remote_heads == [nullid]:
1538 if remote_heads == [nullid]:
1538 warn = 0
1539 warn = 0
1539 elif not revs and len(heads) > len(remote_heads):
1540 elif not revs and len(heads) > len(remote_heads):
1540 warn = 1
1541 warn = 1
1541 else:
1542 else:
1542 newheads = list(heads)
1543 newheads = list(heads)
1543 for r in remote_heads:
1544 for r in remote_heads:
1544 if r in self.changelog.nodemap:
1545 if r in self.changelog.nodemap:
1545 desc = self.changelog.heads(r, heads)
1546 desc = self.changelog.heads(r, heads)
1546 l = [h for h in heads if h in desc]
1547 l = [h for h in heads if h in desc]
1547 if not l:
1548 if not l:
1548 newheads.append(r)
1549 newheads.append(r)
1549 else:
1550 else:
1550 newheads.append(r)
1551 newheads.append(r)
1551 if len(newheads) > len(remote_heads):
1552 if len(newheads) > len(remote_heads):
1552 warn = 1
1553 warn = 1
1553
1554
1554 if warn:
1555 if warn:
1555 self.ui.warn(_("abort: push creates new remote heads!\n"))
1556 self.ui.warn(_("abort: push creates new remote heads!\n"))
1556 self.ui.status(_("(did you forget to merge?"
1557 self.ui.status(_("(did you forget to merge?"
1557 " use push -f to force)\n"))
1558 " use push -f to force)\n"))
1558 return None, 0
1559 return None, 0
1559 elif inc:
1560 elif inc:
1560 self.ui.warn(_("note: unsynced remote changes!\n"))
1561 self.ui.warn(_("note: unsynced remote changes!\n"))
1561
1562
1562
1563
1563 if revs is None:
1564 if revs is None:
1564 # use the fast path, no race possible on push
1565 # use the fast path, no race possible on push
1565 cg = self._changegroup(common.keys(), 'push')
1566 cg = self._changegroup(common.keys(), 'push')
1566 else:
1567 else:
1567 cg = self.changegroupsubset(update, revs, 'push')
1568 cg = self.changegroupsubset(update, revs, 'push')
1568 return cg, remote_heads
1569 return cg, remote_heads
1569
1570
1570 def push_addchangegroup(self, remote, force, revs):
1571 def push_addchangegroup(self, remote, force, revs):
1571 lock = remote.lock()
1572 lock = remote.lock()
1572 try:
1573 try:
1573 ret = self.prepush(remote, force, revs)
1574 ret = self.prepush(remote, force, revs)
1574 if ret[0] is not None:
1575 if ret[0] is not None:
1575 cg, remote_heads = ret
1576 cg, remote_heads = ret
1576 return remote.addchangegroup(cg, 'push', self.url())
1577 return remote.addchangegroup(cg, 'push', self.url())
1577 return ret[1]
1578 return ret[1]
1578 finally:
1579 finally:
1579 del lock
1580 del lock
1580
1581
1581 def push_unbundle(self, remote, force, revs):
1582 def push_unbundle(self, remote, force, revs):
1582 # local repo finds heads on server, finds out what revs it
1583 # local repo finds heads on server, finds out what revs it
1583 # must push. once revs transferred, if server finds it has
1584 # must push. once revs transferred, if server finds it has
1584 # different heads (someone else won commit/push race), server
1585 # different heads (someone else won commit/push race), server
1585 # aborts.
1586 # aborts.
1586
1587
1587 ret = self.prepush(remote, force, revs)
1588 ret = self.prepush(remote, force, revs)
1588 if ret[0] is not None:
1589 if ret[0] is not None:
1589 cg, remote_heads = ret
1590 cg, remote_heads = ret
1590 if force: remote_heads = ['force']
1591 if force: remote_heads = ['force']
1591 return remote.unbundle(cg, remote_heads, 'push')
1592 return remote.unbundle(cg, remote_heads, 'push')
1592 return ret[1]
1593 return ret[1]
1593
1594
1594 def changegroupinfo(self, nodes, source):
1595 def changegroupinfo(self, nodes, source):
1595 if self.ui.verbose or source == 'bundle':
1596 if self.ui.verbose or source == 'bundle':
1596 self.ui.status(_("%d changesets found\n") % len(nodes))
1597 self.ui.status(_("%d changesets found\n") % len(nodes))
1597 if self.ui.debugflag:
1598 if self.ui.debugflag:
1598 self.ui.debug(_("list of changesets:\n"))
1599 self.ui.debug(_("list of changesets:\n"))
1599 for node in nodes:
1600 for node in nodes:
1600 self.ui.debug("%s\n" % hex(node))
1601 self.ui.debug("%s\n" % hex(node))
1601
1602
1602 def changegroupsubset(self, bases, heads, source, extranodes=None):
1603 def changegroupsubset(self, bases, heads, source, extranodes=None):
1603 """This function generates a changegroup consisting of all the nodes
1604 """This function generates a changegroup consisting of all the nodes
1604 that are descendents of any of the bases, and ancestors of any of
1605 that are descendents of any of the bases, and ancestors of any of
1605 the heads.
1606 the heads.
1606
1607
1607 It is fairly complex as determining which filenodes and which
1608 It is fairly complex as determining which filenodes and which
1608 manifest nodes need to be included for the changeset to be complete
1609 manifest nodes need to be included for the changeset to be complete
1609 is non-trivial.
1610 is non-trivial.
1610
1611
1611 Another wrinkle is doing the reverse, figuring out which changeset in
1612 Another wrinkle is doing the reverse, figuring out which changeset in
1612 the changegroup a particular filenode or manifestnode belongs to.
1613 the changegroup a particular filenode or manifestnode belongs to.
1613
1614
1614 The caller can specify some nodes that must be included in the
1615 The caller can specify some nodes that must be included in the
1615 changegroup using the extranodes argument. It should be a dict
1616 changegroup using the extranodes argument. It should be a dict
1616 where the keys are the filenames (or 1 for the manifest), and the
1617 where the keys are the filenames (or 1 for the manifest), and the
1617 values are lists of (node, linknode) tuples, where node is a wanted
1618 values are lists of (node, linknode) tuples, where node is a wanted
1618 node and linknode is the changelog node that should be transmitted as
1619 node and linknode is the changelog node that should be transmitted as
1619 the linkrev.
1620 the linkrev.
1620 """
1621 """
1621
1622
1622 if extranodes is None:
1623 if extranodes is None:
1623 # can we go through the fast path ?
1624 # can we go through the fast path ?
1624 heads.sort()
1625 heads.sort()
1625 allheads = self.heads()
1626 allheads = self.heads()
1626 allheads.sort()
1627 allheads.sort()
1627 if heads == allheads:
1628 if heads == allheads:
1628 common = []
1629 common = []
1629 # parents of bases are known from both sides
1630 # parents of bases are known from both sides
1630 for n in bases:
1631 for n in bases:
1631 for p in self.changelog.parents(n):
1632 for p in self.changelog.parents(n):
1632 if p != nullid:
1633 if p != nullid:
1633 common.append(p)
1634 common.append(p)
1634 return self._changegroup(common, source)
1635 return self._changegroup(common, source)
1635
1636
1636 self.hook('preoutgoing', throw=True, source=source)
1637 self.hook('preoutgoing', throw=True, source=source)
1637
1638
1638 # Set up some initial variables
1639 # Set up some initial variables
1639 # Make it easy to refer to self.changelog
1640 # Make it easy to refer to self.changelog
1640 cl = self.changelog
1641 cl = self.changelog
1641 # msng is short for missing - compute the list of changesets in this
1642 # msng is short for missing - compute the list of changesets in this
1642 # changegroup.
1643 # changegroup.
1643 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1644 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1644 self.changegroupinfo(msng_cl_lst, source)
1645 self.changegroupinfo(msng_cl_lst, source)
1645 # Some bases may turn out to be superfluous, and some heads may be
1646 # Some bases may turn out to be superfluous, and some heads may be
1646 # too. nodesbetween will return the minimal set of bases and heads
1647 # too. nodesbetween will return the minimal set of bases and heads
1647 # necessary to re-create the changegroup.
1648 # necessary to re-create the changegroup.
1648
1649
1649 # Known heads are the list of heads that it is assumed the recipient
1650 # Known heads are the list of heads that it is assumed the recipient
1650 # of this changegroup will know about.
1651 # of this changegroup will know about.
1651 knownheads = {}
1652 knownheads = {}
1652 # We assume that all parents of bases are known heads.
1653 # We assume that all parents of bases are known heads.
1653 for n in bases:
1654 for n in bases:
1654 for p in cl.parents(n):
1655 for p in cl.parents(n):
1655 if p != nullid:
1656 if p != nullid:
1656 knownheads[p] = 1
1657 knownheads[p] = 1
1657 knownheads = knownheads.keys()
1658 knownheads = knownheads.keys()
1658 if knownheads:
1659 if knownheads:
1659 # Now that we know what heads are known, we can compute which
1660 # Now that we know what heads are known, we can compute which
1660 # changesets are known. The recipient must know about all
1661 # changesets are known. The recipient must know about all
1661 # changesets required to reach the known heads from the null
1662 # changesets required to reach the known heads from the null
1662 # changeset.
1663 # changeset.
1663 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1664 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1664 junk = None
1665 junk = None
1665 # Transform the list into an ersatz set.
1666 # Transform the list into an ersatz set.
1666 has_cl_set = dict.fromkeys(has_cl_set)
1667 has_cl_set = dict.fromkeys(has_cl_set)
1667 else:
1668 else:
1668 # If there were no known heads, the recipient cannot be assumed to
1669 # If there were no known heads, the recipient cannot be assumed to
1669 # know about any changesets.
1670 # know about any changesets.
1670 has_cl_set = {}
1671 has_cl_set = {}
1671
1672
1672 # Make it easy to refer to self.manifest
1673 # Make it easy to refer to self.manifest
1673 mnfst = self.manifest
1674 mnfst = self.manifest
1674 # We don't know which manifests are missing yet
1675 # We don't know which manifests are missing yet
1675 msng_mnfst_set = {}
1676 msng_mnfst_set = {}
1676 # Nor do we know which filenodes are missing.
1677 # Nor do we know which filenodes are missing.
1677 msng_filenode_set = {}
1678 msng_filenode_set = {}
1678
1679
1679 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1680 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1680 junk = None
1681 junk = None
1681
1682
1682 # A changeset always belongs to itself, so the changenode lookup
1683 # A changeset always belongs to itself, so the changenode lookup
1683 # function for a changenode is identity.
1684 # function for a changenode is identity.
1684 def identity(x):
1685 def identity(x):
1685 return x
1686 return x
1686
1687
1687 # A function generating function. Sets up an environment for the
1688 # A function generating function. Sets up an environment for the
1688 # inner function.
1689 # inner function.
1689 def cmp_by_rev_func(revlog):
1690 def cmp_by_rev_func(revlog):
1690 # Compare two nodes by their revision number in the environment's
1691 # Compare two nodes by their revision number in the environment's
1691 # revision history. Since the revision number both represents the
1692 # revision history. Since the revision number both represents the
1692 # most efficient order to read the nodes in, and represents a
1693 # most efficient order to read the nodes in, and represents a
1693 # topological sorting of the nodes, this function is often useful.
1694 # topological sorting of the nodes, this function is often useful.
1694 def cmp_by_rev(a, b):
1695 def cmp_by_rev(a, b):
1695 return cmp(revlog.rev(a), revlog.rev(b))
1696 return cmp(revlog.rev(a), revlog.rev(b))
1696 return cmp_by_rev
1697 return cmp_by_rev
1697
1698
1698 # If we determine that a particular file or manifest node must be a
1699 # If we determine that a particular file or manifest node must be a
1699 # node that the recipient of the changegroup will already have, we can
1700 # node that the recipient of the changegroup will already have, we can
1700 # also assume the recipient will have all the parents. This function
1701 # also assume the recipient will have all the parents. This function
1701 # prunes them from the set of missing nodes.
1702 # prunes them from the set of missing nodes.
1702 def prune_parents(revlog, hasset, msngset):
1703 def prune_parents(revlog, hasset, msngset):
1703 haslst = hasset.keys()
1704 haslst = hasset.keys()
1704 haslst.sort(cmp_by_rev_func(revlog))
1705 haslst.sort(cmp_by_rev_func(revlog))
1705 for node in haslst:
1706 for node in haslst:
1706 parentlst = [p for p in revlog.parents(node) if p != nullid]
1707 parentlst = [p for p in revlog.parents(node) if p != nullid]
1707 while parentlst:
1708 while parentlst:
1708 n = parentlst.pop()
1709 n = parentlst.pop()
1709 if n not in hasset:
1710 if n not in hasset:
1710 hasset[n] = 1
1711 hasset[n] = 1
1711 p = [p for p in revlog.parents(n) if p != nullid]
1712 p = [p for p in revlog.parents(n) if p != nullid]
1712 parentlst.extend(p)
1713 parentlst.extend(p)
1713 for n in hasset:
1714 for n in hasset:
1714 msngset.pop(n, None)
1715 msngset.pop(n, None)
1715
1716
1716 # This is a function generating function used to set up an environment
1717 # This is a function generating function used to set up an environment
1717 # for the inner function to execute in.
1718 # for the inner function to execute in.
1718 def manifest_and_file_collector(changedfileset):
1719 def manifest_and_file_collector(changedfileset):
1719 # This is an information gathering function that gathers
1720 # This is an information gathering function that gathers
1720 # information from each changeset node that goes out as part of
1721 # information from each changeset node that goes out as part of
1721 # the changegroup. The information gathered is a list of which
1722 # the changegroup. The information gathered is a list of which
1722 # manifest nodes are potentially required (the recipient may
1723 # manifest nodes are potentially required (the recipient may
1723 # already have them) and total list of all files which were
1724 # already have them) and total list of all files which were
1724 # changed in any changeset in the changegroup.
1725 # changed in any changeset in the changegroup.
1725 #
1726 #
1726 # We also remember the first changenode we saw any manifest
1727 # We also remember the first changenode we saw any manifest
1727 # referenced by so we can later determine which changenode 'owns'
1728 # referenced by so we can later determine which changenode 'owns'
1728 # the manifest.
1729 # the manifest.
1729 def collect_manifests_and_files(clnode):
1730 def collect_manifests_and_files(clnode):
1730 c = cl.read(clnode)
1731 c = cl.read(clnode)
1731 for f in c[3]:
1732 for f in c[3]:
1732 # This is to make sure we only have one instance of each
1733 # This is to make sure we only have one instance of each
1733 # filename string for each filename.
1734 # filename string for each filename.
1734 changedfileset.setdefault(f, f)
1735 changedfileset.setdefault(f, f)
1735 msng_mnfst_set.setdefault(c[0], clnode)
1736 msng_mnfst_set.setdefault(c[0], clnode)
1736 return collect_manifests_and_files
1737 return collect_manifests_and_files
1737
1738
1738 # Figure out which manifest nodes (of the ones we think might be part
1739 # Figure out which manifest nodes (of the ones we think might be part
1739 # of the changegroup) the recipient must know about and remove them
1740 # of the changegroup) the recipient must know about and remove them
1740 # from the changegroup.
1741 # from the changegroup.
1741 def prune_manifests():
1742 def prune_manifests():
1742 has_mnfst_set = {}
1743 has_mnfst_set = {}
1743 for n in msng_mnfst_set:
1744 for n in msng_mnfst_set:
1744 # If a 'missing' manifest thinks it belongs to a changenode
1745 # If a 'missing' manifest thinks it belongs to a changenode
1745 # the recipient is assumed to have, obviously the recipient
1746 # the recipient is assumed to have, obviously the recipient
1746 # must have that manifest.
1747 # must have that manifest.
1747 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1748 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1748 if linknode in has_cl_set:
1749 if linknode in has_cl_set:
1749 has_mnfst_set[n] = 1
1750 has_mnfst_set[n] = 1
1750 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1751 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1751
1752
1752 # Use the information collected in collect_manifests_and_files to say
1753 # Use the information collected in collect_manifests_and_files to say
1753 # which changenode any manifestnode belongs to.
1754 # which changenode any manifestnode belongs to.
1754 def lookup_manifest_link(mnfstnode):
1755 def lookup_manifest_link(mnfstnode):
1755 return msng_mnfst_set[mnfstnode]
1756 return msng_mnfst_set[mnfstnode]
1756
1757
1757 # A function generating function that sets up the initial environment
1758 # A function generating function that sets up the initial environment
1758 # the inner function.
1759 # the inner function.
1759 def filenode_collector(changedfiles):
1760 def filenode_collector(changedfiles):
1760 next_rev = [0]
1761 next_rev = [0]
1761 # This gathers information from each manifestnode included in the
1762 # This gathers information from each manifestnode included in the
1762 # changegroup about which filenodes the manifest node references
1763 # changegroup about which filenodes the manifest node references
1763 # so we can include those in the changegroup too.
1764 # so we can include those in the changegroup too.
1764 #
1765 #
1765 # It also remembers which changenode each filenode belongs to. It
1766 # It also remembers which changenode each filenode belongs to. It
1766 # does this by assuming the a filenode belongs to the changenode
1767 # does this by assuming the a filenode belongs to the changenode
1767 # the first manifest that references it belongs to.
1768 # the first manifest that references it belongs to.
1768 def collect_msng_filenodes(mnfstnode):
1769 def collect_msng_filenodes(mnfstnode):
1769 r = mnfst.rev(mnfstnode)
1770 r = mnfst.rev(mnfstnode)
1770 if r == next_rev[0]:
1771 if r == next_rev[0]:
1771 # If the last rev we looked at was the one just previous,
1772 # If the last rev we looked at was the one just previous,
1772 # we only need to see a diff.
1773 # we only need to see a diff.
1773 deltamf = mnfst.readdelta(mnfstnode)
1774 deltamf = mnfst.readdelta(mnfstnode)
1774 # For each line in the delta
1775 # For each line in the delta
1775 for f, fnode in deltamf.iteritems():
1776 for f, fnode in deltamf.iteritems():
1776 f = changedfiles.get(f, None)
1777 f = changedfiles.get(f, None)
1777 # And if the file is in the list of files we care
1778 # And if the file is in the list of files we care
1778 # about.
1779 # about.
1779 if f is not None:
1780 if f is not None:
1780 # Get the changenode this manifest belongs to
1781 # Get the changenode this manifest belongs to
1781 clnode = msng_mnfst_set[mnfstnode]
1782 clnode = msng_mnfst_set[mnfstnode]
1782 # Create the set of filenodes for the file if
1783 # Create the set of filenodes for the file if
1783 # there isn't one already.
1784 # there isn't one already.
1784 ndset = msng_filenode_set.setdefault(f, {})
1785 ndset = msng_filenode_set.setdefault(f, {})
1785 # And set the filenode's changelog node to the
1786 # And set the filenode's changelog node to the
1786 # manifest's if it hasn't been set already.
1787 # manifest's if it hasn't been set already.
1787 ndset.setdefault(fnode, clnode)
1788 ndset.setdefault(fnode, clnode)
1788 else:
1789 else:
1789 # Otherwise we need a full manifest.
1790 # Otherwise we need a full manifest.
1790 m = mnfst.read(mnfstnode)
1791 m = mnfst.read(mnfstnode)
1791 # For every file in we care about.
1792 # For every file in we care about.
1792 for f in changedfiles:
1793 for f in changedfiles:
1793 fnode = m.get(f, None)
1794 fnode = m.get(f, None)
1794 # If it's in the manifest
1795 # If it's in the manifest
1795 if fnode is not None:
1796 if fnode is not None:
1796 # See comments above.
1797 # See comments above.
1797 clnode = msng_mnfst_set[mnfstnode]
1798 clnode = msng_mnfst_set[mnfstnode]
1798 ndset = msng_filenode_set.setdefault(f, {})
1799 ndset = msng_filenode_set.setdefault(f, {})
1799 ndset.setdefault(fnode, clnode)
1800 ndset.setdefault(fnode, clnode)
1800 # Remember the revision we hope to see next.
1801 # Remember the revision we hope to see next.
1801 next_rev[0] = r + 1
1802 next_rev[0] = r + 1
1802 return collect_msng_filenodes
1803 return collect_msng_filenodes
1803
1804
1804 # We have a list of filenodes we think we need for a file, lets remove
1805 # We have a list of filenodes we think we need for a file, lets remove
1805 # all those we now the recipient must have.
1806 # all those we now the recipient must have.
1806 def prune_filenodes(f, filerevlog):
1807 def prune_filenodes(f, filerevlog):
1807 msngset = msng_filenode_set[f]
1808 msngset = msng_filenode_set[f]
1808 hasset = {}
1809 hasset = {}
1809 # If a 'missing' filenode thinks it belongs to a changenode we
1810 # If a 'missing' filenode thinks it belongs to a changenode we
1810 # assume the recipient must have, then the recipient must have
1811 # assume the recipient must have, then the recipient must have
1811 # that filenode.
1812 # that filenode.
1812 for n in msngset:
1813 for n in msngset:
1813 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1814 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1814 if clnode in has_cl_set:
1815 if clnode in has_cl_set:
1815 hasset[n] = 1
1816 hasset[n] = 1
1816 prune_parents(filerevlog, hasset, msngset)
1817 prune_parents(filerevlog, hasset, msngset)
1817
1818
1818 # A function generator function that sets up the a context for the
1819 # A function generator function that sets up the a context for the
1819 # inner function.
1820 # inner function.
1820 def lookup_filenode_link_func(fname):
1821 def lookup_filenode_link_func(fname):
1821 msngset = msng_filenode_set[fname]
1822 msngset = msng_filenode_set[fname]
1822 # Lookup the changenode the filenode belongs to.
1823 # Lookup the changenode the filenode belongs to.
1823 def lookup_filenode_link(fnode):
1824 def lookup_filenode_link(fnode):
1824 return msngset[fnode]
1825 return msngset[fnode]
1825 return lookup_filenode_link
1826 return lookup_filenode_link
1826
1827
1827 # Add the nodes that were explicitly requested.
1828 # Add the nodes that were explicitly requested.
1828 def add_extra_nodes(name, nodes):
1829 def add_extra_nodes(name, nodes):
1829 if not extranodes or name not in extranodes:
1830 if not extranodes or name not in extranodes:
1830 return
1831 return
1831
1832
1832 for node, linknode in extranodes[name]:
1833 for node, linknode in extranodes[name]:
1833 if node not in nodes:
1834 if node not in nodes:
1834 nodes[node] = linknode
1835 nodes[node] = linknode
1835
1836
1836 # Now that we have all theses utility functions to help out and
1837 # Now that we have all theses utility functions to help out and
1837 # logically divide up the task, generate the group.
1838 # logically divide up the task, generate the group.
1838 def gengroup():
1839 def gengroup():
1839 # The set of changed files starts empty.
1840 # The set of changed files starts empty.
1840 changedfiles = {}
1841 changedfiles = {}
1841 # Create a changenode group generator that will call our functions
1842 # Create a changenode group generator that will call our functions
1842 # back to lookup the owning changenode and collect information.
1843 # back to lookup the owning changenode and collect information.
1843 group = cl.group(msng_cl_lst, identity,
1844 group = cl.group(msng_cl_lst, identity,
1844 manifest_and_file_collector(changedfiles))
1845 manifest_and_file_collector(changedfiles))
1845 for chnk in group:
1846 for chnk in group:
1846 yield chnk
1847 yield chnk
1847
1848
1848 # The list of manifests has been collected by the generator
1849 # The list of manifests has been collected by the generator
1849 # calling our functions back.
1850 # calling our functions back.
1850 prune_manifests()
1851 prune_manifests()
1851 add_extra_nodes(1, msng_mnfst_set)
1852 add_extra_nodes(1, msng_mnfst_set)
1852 msng_mnfst_lst = msng_mnfst_set.keys()
1853 msng_mnfst_lst = msng_mnfst_set.keys()
1853 # Sort the manifestnodes by revision number.
1854 # Sort the manifestnodes by revision number.
1854 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1855 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1855 # Create a generator for the manifestnodes that calls our lookup
1856 # Create a generator for the manifestnodes that calls our lookup
1856 # and data collection functions back.
1857 # and data collection functions back.
1857 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1858 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1858 filenode_collector(changedfiles))
1859 filenode_collector(changedfiles))
1859 for chnk in group:
1860 for chnk in group:
1860 yield chnk
1861 yield chnk
1861
1862
1862 # These are no longer needed, dereference and toss the memory for
1863 # These are no longer needed, dereference and toss the memory for
1863 # them.
1864 # them.
1864 msng_mnfst_lst = None
1865 msng_mnfst_lst = None
1865 msng_mnfst_set.clear()
1866 msng_mnfst_set.clear()
1866
1867
1867 if extranodes:
1868 if extranodes:
1868 for fname in extranodes:
1869 for fname in extranodes:
1869 if isinstance(fname, int):
1870 if isinstance(fname, int):
1870 continue
1871 continue
1871 msng_filenode_set.setdefault(fname, {})
1872 msng_filenode_set.setdefault(fname, {})
1872 changedfiles[fname] = 1
1873 changedfiles[fname] = 1
1873 # Go through all our files in order sorted by name.
1874 # Go through all our files in order sorted by name.
1874 for fname in util.sort(changedfiles):
1875 for fname in util.sort(changedfiles):
1875 filerevlog = self.file(fname)
1876 filerevlog = self.file(fname)
1876 if not len(filerevlog):
1877 if not len(filerevlog):
1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 # Toss out the filenodes that the recipient isn't really
1879 # Toss out the filenodes that the recipient isn't really
1879 # missing.
1880 # missing.
1880 if fname in msng_filenode_set:
1881 if fname in msng_filenode_set:
1881 prune_filenodes(fname, filerevlog)
1882 prune_filenodes(fname, filerevlog)
1882 add_extra_nodes(fname, msng_filenode_set[fname])
1883 add_extra_nodes(fname, msng_filenode_set[fname])
1883 msng_filenode_lst = msng_filenode_set[fname].keys()
1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1884 else:
1885 else:
1885 msng_filenode_lst = []
1886 msng_filenode_lst = []
1886 # If any filenodes are left, generate the group for them,
1887 # If any filenodes are left, generate the group for them,
1887 # otherwise don't bother.
1888 # otherwise don't bother.
1888 if len(msng_filenode_lst) > 0:
1889 if len(msng_filenode_lst) > 0:
1889 yield changegroup.chunkheader(len(fname))
1890 yield changegroup.chunkheader(len(fname))
1890 yield fname
1891 yield fname
1891 # Sort the filenodes by their revision #
1892 # Sort the filenodes by their revision #
1892 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1893 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1893 # Create a group generator and only pass in a changenode
1894 # Create a group generator and only pass in a changenode
1894 # lookup function as we need to collect no information
1895 # lookup function as we need to collect no information
1895 # from filenodes.
1896 # from filenodes.
1896 group = filerevlog.group(msng_filenode_lst,
1897 group = filerevlog.group(msng_filenode_lst,
1897 lookup_filenode_link_func(fname))
1898 lookup_filenode_link_func(fname))
1898 for chnk in group:
1899 for chnk in group:
1899 yield chnk
1900 yield chnk
1900 if fname in msng_filenode_set:
1901 if fname in msng_filenode_set:
1901 # Don't need this anymore, toss it to free memory.
1902 # Don't need this anymore, toss it to free memory.
1902 del msng_filenode_set[fname]
1903 del msng_filenode_set[fname]
1903 # Signal that no more groups are left.
1904 # Signal that no more groups are left.
1904 yield changegroup.closechunk()
1905 yield changegroup.closechunk()
1905
1906
1906 if msng_cl_lst:
1907 if msng_cl_lst:
1907 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1908
1909
1909 return util.chunkbuffer(gengroup())
1910 return util.chunkbuffer(gengroup())
1910
1911
1911 def changegroup(self, basenodes, source):
1912 def changegroup(self, basenodes, source):
1912 # to avoid a race we use changegroupsubset() (issue1320)
1913 # to avoid a race we use changegroupsubset() (issue1320)
1913 return self.changegroupsubset(basenodes, self.heads(), source)
1914 return self.changegroupsubset(basenodes, self.heads(), source)
1914
1915
1915 def _changegroup(self, common, source):
1916 def _changegroup(self, common, source):
1916 """Generate a changegroup of all nodes that we have that a recipient
1917 """Generate a changegroup of all nodes that we have that a recipient
1917 doesn't.
1918 doesn't.
1918
1919
1919 This is much easier than the previous function as we can assume that
1920 This is much easier than the previous function as we can assume that
1920 the recipient has any changenode we aren't sending them.
1921 the recipient has any changenode we aren't sending them.
1921
1922
1922 common is the set of common nodes between remote and self"""
1923 common is the set of common nodes between remote and self"""
1923
1924
1924 self.hook('preoutgoing', throw=True, source=source)
1925 self.hook('preoutgoing', throw=True, source=source)
1925
1926
1926 cl = self.changelog
1927 cl = self.changelog
1927 nodes = cl.findmissing(common)
1928 nodes = cl.findmissing(common)
1928 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1929 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1929 self.changegroupinfo(nodes, source)
1930 self.changegroupinfo(nodes, source)
1930
1931
1931 def identity(x):
1932 def identity(x):
1932 return x
1933 return x
1933
1934
1934 def gennodelst(log):
1935 def gennodelst(log):
1935 for r in log:
1936 for r in log:
1936 if log.linkrev(r) in revset:
1937 if log.linkrev(r) in revset:
1937 yield log.node(r)
1938 yield log.node(r)
1938
1939
1939 def changed_file_collector(changedfileset):
1940 def changed_file_collector(changedfileset):
1940 def collect_changed_files(clnode):
1941 def collect_changed_files(clnode):
1941 c = cl.read(clnode)
1942 c = cl.read(clnode)
1942 for fname in c[3]:
1943 for fname in c[3]:
1943 changedfileset[fname] = 1
1944 changedfileset[fname] = 1
1944 return collect_changed_files
1945 return collect_changed_files
1945
1946
1946 def lookuprevlink_func(revlog):
1947 def lookuprevlink_func(revlog):
1947 def lookuprevlink(n):
1948 def lookuprevlink(n):
1948 return cl.node(revlog.linkrev(revlog.rev(n)))
1949 return cl.node(revlog.linkrev(revlog.rev(n)))
1949 return lookuprevlink
1950 return lookuprevlink
1950
1951
1951 def gengroup():
1952 def gengroup():
1952 # construct a list of all changed files
1953 # construct a list of all changed files
1953 changedfiles = {}
1954 changedfiles = {}
1954
1955
1955 for chnk in cl.group(nodes, identity,
1956 for chnk in cl.group(nodes, identity,
1956 changed_file_collector(changedfiles)):
1957 changed_file_collector(changedfiles)):
1957 yield chnk
1958 yield chnk
1958
1959
1959 mnfst = self.manifest
1960 mnfst = self.manifest
1960 nodeiter = gennodelst(mnfst)
1961 nodeiter = gennodelst(mnfst)
1961 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1962 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1962 yield chnk
1963 yield chnk
1963
1964
1964 for fname in util.sort(changedfiles):
1965 for fname in util.sort(changedfiles):
1965 filerevlog = self.file(fname)
1966 filerevlog = self.file(fname)
1966 if not len(filerevlog):
1967 if not len(filerevlog):
1967 raise util.Abort(_("empty or missing revlog for %s") % fname)
1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1968 nodeiter = gennodelst(filerevlog)
1969 nodeiter = gennodelst(filerevlog)
1969 nodeiter = list(nodeiter)
1970 nodeiter = list(nodeiter)
1970 if nodeiter:
1971 if nodeiter:
1971 yield changegroup.chunkheader(len(fname))
1972 yield changegroup.chunkheader(len(fname))
1972 yield fname
1973 yield fname
1973 lookup = lookuprevlink_func(filerevlog)
1974 lookup = lookuprevlink_func(filerevlog)
1974 for chnk in filerevlog.group(nodeiter, lookup):
1975 for chnk in filerevlog.group(nodeiter, lookup):
1975 yield chnk
1976 yield chnk
1976
1977
1977 yield changegroup.closechunk()
1978 yield changegroup.closechunk()
1978
1979
1979 if nodes:
1980 if nodes:
1980 self.hook('outgoing', node=hex(nodes[0]), source=source)
1981 self.hook('outgoing', node=hex(nodes[0]), source=source)
1981
1982
1982 return util.chunkbuffer(gengroup())
1983 return util.chunkbuffer(gengroup())
1983
1984
1984 def addchangegroup(self, source, srctype, url, emptyok=False):
1985 def addchangegroup(self, source, srctype, url, emptyok=False):
1985 """add changegroup to repo.
1986 """add changegroup to repo.
1986
1987
1987 return values:
1988 return values:
1988 - nothing changed or no source: 0
1989 - nothing changed or no source: 0
1989 - more heads than before: 1+added heads (2..n)
1990 - more heads than before: 1+added heads (2..n)
1990 - less heads than before: -1-removed heads (-2..-n)
1991 - less heads than before: -1-removed heads (-2..-n)
1991 - number of heads stays the same: 1
1992 - number of heads stays the same: 1
1992 """
1993 """
1993 def csmap(x):
1994 def csmap(x):
1994 self.ui.debug(_("add changeset %s\n") % short(x))
1995 self.ui.debug(_("add changeset %s\n") % short(x))
1995 return len(cl)
1996 return len(cl)
1996
1997
1997 def revmap(x):
1998 def revmap(x):
1998 return cl.rev(x)
1999 return cl.rev(x)
1999
2000
2000 if not source:
2001 if not source:
2001 return 0
2002 return 0
2002
2003
2003 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2004 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2004
2005
2005 changesets = files = revisions = 0
2006 changesets = files = revisions = 0
2006
2007
2007 # write changelog data to temp files so concurrent readers will not see
2008 # write changelog data to temp files so concurrent readers will not see
2008 # inconsistent view
2009 # inconsistent view
2009 cl = self.changelog
2010 cl = self.changelog
2010 cl.delayupdate()
2011 cl.delayupdate()
2011 oldheads = len(cl.heads())
2012 oldheads = len(cl.heads())
2012
2013
2013 tr = self.transaction()
2014 tr = self.transaction()
2014 try:
2015 try:
2015 trp = weakref.proxy(tr)
2016 trp = weakref.proxy(tr)
2016 # pull off the changeset group
2017 # pull off the changeset group
2017 self.ui.status(_("adding changesets\n"))
2018 self.ui.status(_("adding changesets\n"))
2018 cor = len(cl) - 1
2019 cor = len(cl) - 1
2019 chunkiter = changegroup.chunkiter(source)
2020 chunkiter = changegroup.chunkiter(source)
2020 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2021 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2021 raise util.Abort(_("received changelog group is empty"))
2022 raise util.Abort(_("received changelog group is empty"))
2022 cnr = len(cl) - 1
2023 cnr = len(cl) - 1
2023 changesets = cnr - cor
2024 changesets = cnr - cor
2024
2025
2025 # pull off the manifest group
2026 # pull off the manifest group
2026 self.ui.status(_("adding manifests\n"))
2027 self.ui.status(_("adding manifests\n"))
2027 chunkiter = changegroup.chunkiter(source)
2028 chunkiter = changegroup.chunkiter(source)
2028 # no need to check for empty manifest group here:
2029 # no need to check for empty manifest group here:
2029 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2030 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2030 # no new manifest will be created and the manifest group will
2031 # no new manifest will be created and the manifest group will
2031 # be empty during the pull
2032 # be empty during the pull
2032 self.manifest.addgroup(chunkiter, revmap, trp)
2033 self.manifest.addgroup(chunkiter, revmap, trp)
2033
2034
2034 # process the files
2035 # process the files
2035 self.ui.status(_("adding file changes\n"))
2036 self.ui.status(_("adding file changes\n"))
2036 while 1:
2037 while 1:
2037 f = changegroup.getchunk(source)
2038 f = changegroup.getchunk(source)
2038 if not f:
2039 if not f:
2039 break
2040 break
2040 self.ui.debug(_("adding %s revisions\n") % f)
2041 self.ui.debug(_("adding %s revisions\n") % f)
2041 fl = self.file(f)
2042 fl = self.file(f)
2042 o = len(fl)
2043 o = len(fl)
2043 chunkiter = changegroup.chunkiter(source)
2044 chunkiter = changegroup.chunkiter(source)
2044 if fl.addgroup(chunkiter, revmap, trp) is None:
2045 if fl.addgroup(chunkiter, revmap, trp) is None:
2045 raise util.Abort(_("received file revlog group is empty"))
2046 raise util.Abort(_("received file revlog group is empty"))
2046 revisions += len(fl) - o
2047 revisions += len(fl) - o
2047 files += 1
2048 files += 1
2048
2049
2049 newheads = len(self.changelog.heads())
2050 newheads = len(self.changelog.heads())
2050 heads = ""
2051 heads = ""
2051 if oldheads and newheads != oldheads:
2052 if oldheads and newheads != oldheads:
2052 heads = _(" (%+d heads)") % (newheads - oldheads)
2053 heads = _(" (%+d heads)") % (newheads - oldheads)
2053
2054
2054 self.ui.status(_("added %d changesets"
2055 self.ui.status(_("added %d changesets"
2055 " with %d changes to %d files%s\n")
2056 " with %d changes to %d files%s\n")
2056 % (changesets, revisions, files, heads))
2057 % (changesets, revisions, files, heads))
2057
2058
2058 if changesets > 0:
2059 if changesets > 0:
2059 p = lambda: self.changelog.writepending() and self.root or ""
2060 p = lambda: self.changelog.writepending() and self.root or ""
2060 self.hook('pretxnchangegroup', throw=True,
2061 self.hook('pretxnchangegroup', throw=True,
2061 node=hex(self.changelog.node(cor+1)), source=srctype,
2062 node=hex(self.changelog.node(cor+1)), source=srctype,
2062 url=url, pending=p)
2063 url=url, pending=p)
2063
2064
2064 # make changelog see real files again
2065 # make changelog see real files again
2065 cl.finalize(trp)
2066 cl.finalize(trp)
2066
2067
2067 tr.close()
2068 tr.close()
2068 finally:
2069 finally:
2069 del tr
2070 del tr
2070
2071
2071 if changesets > 0:
2072 if changesets > 0:
2072 # forcefully update the on-disk branch cache
2073 # forcefully update the on-disk branch cache
2073 self.ui.debug(_("updating the branch cache\n"))
2074 self.ui.debug(_("updating the branch cache\n"))
2074 self.branchtags()
2075 self.branchtags()
2075 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2076 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2076 source=srctype, url=url)
2077 source=srctype, url=url)
2077
2078
2078 for i in xrange(cor + 1, cnr + 1):
2079 for i in xrange(cor + 1, cnr + 1):
2079 self.hook("incoming", node=hex(self.changelog.node(i)),
2080 self.hook("incoming", node=hex(self.changelog.node(i)),
2080 source=srctype, url=url)
2081 source=srctype, url=url)
2081
2082
2082 # never return 0 here:
2083 # never return 0 here:
2083 if newheads < oldheads:
2084 if newheads < oldheads:
2084 return newheads - oldheads - 1
2085 return newheads - oldheads - 1
2085 else:
2086 else:
2086 return newheads - oldheads + 1
2087 return newheads - oldheads + 1
2087
2088
2088
2089
2089 def stream_in(self, remote):
2090 def stream_in(self, remote):
2090 fp = remote.stream_out()
2091 fp = remote.stream_out()
2091 l = fp.readline()
2092 l = fp.readline()
2092 try:
2093 try:
2093 resp = int(l)
2094 resp = int(l)
2094 except ValueError:
2095 except ValueError:
2095 raise error.ResponseError(
2096 raise error.ResponseError(
2096 _('Unexpected response from remote server:'), l)
2097 _('Unexpected response from remote server:'), l)
2097 if resp == 1:
2098 if resp == 1:
2098 raise util.Abort(_('operation forbidden by server'))
2099 raise util.Abort(_('operation forbidden by server'))
2099 elif resp == 2:
2100 elif resp == 2:
2100 raise util.Abort(_('locking the remote repository failed'))
2101 raise util.Abort(_('locking the remote repository failed'))
2101 elif resp != 0:
2102 elif resp != 0:
2102 raise util.Abort(_('the server sent an unknown error code'))
2103 raise util.Abort(_('the server sent an unknown error code'))
2103 self.ui.status(_('streaming all changes\n'))
2104 self.ui.status(_('streaming all changes\n'))
2104 l = fp.readline()
2105 l = fp.readline()
2105 try:
2106 try:
2106 total_files, total_bytes = map(int, l.split(' ', 1))
2107 total_files, total_bytes = map(int, l.split(' ', 1))
2107 except (ValueError, TypeError):
2108 except (ValueError, TypeError):
2108 raise error.ResponseError(
2109 raise error.ResponseError(
2109 _('Unexpected response from remote server:'), l)
2110 _('Unexpected response from remote server:'), l)
2110 self.ui.status(_('%d files to transfer, %s of data\n') %
2111 self.ui.status(_('%d files to transfer, %s of data\n') %
2111 (total_files, util.bytecount(total_bytes)))
2112 (total_files, util.bytecount(total_bytes)))
2112 start = time.time()
2113 start = time.time()
2113 for i in xrange(total_files):
2114 for i in xrange(total_files):
2114 # XXX doesn't support '\n' or '\r' in filenames
2115 # XXX doesn't support '\n' or '\r' in filenames
2115 l = fp.readline()
2116 l = fp.readline()
2116 try:
2117 try:
2117 name, size = l.split('\0', 1)
2118 name, size = l.split('\0', 1)
2118 size = int(size)
2119 size = int(size)
2119 except (ValueError, TypeError):
2120 except (ValueError, TypeError):
2120 raise error.ResponseError(
2121 raise error.ResponseError(
2121 _('Unexpected response from remote server:'), l)
2122 _('Unexpected response from remote server:'), l)
2122 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2123 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2123 ofp = self.sopener(name, 'w')
2124 ofp = self.sopener(name, 'w')
2124 for chunk in util.filechunkiter(fp, limit=size):
2125 for chunk in util.filechunkiter(fp, limit=size):
2125 ofp.write(chunk)
2126 ofp.write(chunk)
2126 ofp.close()
2127 ofp.close()
2127 elapsed = time.time() - start
2128 elapsed = time.time() - start
2128 if elapsed <= 0:
2129 if elapsed <= 0:
2129 elapsed = 0.001
2130 elapsed = 0.001
2130 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2131 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2131 (util.bytecount(total_bytes), elapsed,
2132 (util.bytecount(total_bytes), elapsed,
2132 util.bytecount(total_bytes / elapsed)))
2133 util.bytecount(total_bytes / elapsed)))
2133 self.invalidate()
2134 self.invalidate()
2134 return len(self.heads()) + 1
2135 return len(self.heads()) + 1
2135
2136
2136 def clone(self, remote, heads=[], stream=False):
2137 def clone(self, remote, heads=[], stream=False):
2137 '''clone remote repository.
2138 '''clone remote repository.
2138
2139
2139 keyword arguments:
2140 keyword arguments:
2140 heads: list of revs to clone (forces use of pull)
2141 heads: list of revs to clone (forces use of pull)
2141 stream: use streaming clone if possible'''
2142 stream: use streaming clone if possible'''
2142
2143
2143 # now, all clients that can request uncompressed clones can
2144 # now, all clients that can request uncompressed clones can
2144 # read repo formats supported by all servers that can serve
2145 # read repo formats supported by all servers that can serve
2145 # them.
2146 # them.
2146
2147
2147 # if revlog format changes, client will have to check version
2148 # if revlog format changes, client will have to check version
2148 # and format flags on "stream" capability, and use
2149 # and format flags on "stream" capability, and use
2149 # uncompressed only if compatible.
2150 # uncompressed only if compatible.
2150
2151
2151 if stream and not heads and remote.capable('stream'):
2152 if stream and not heads and remote.capable('stream'):
2152 return self.stream_in(remote)
2153 return self.stream_in(remote)
2153 return self.pull(remote, heads)
2154 return self.pull(remote, heads)
2154
2155
2155 # used to avoid circular references so destructors work
2156 # used to avoid circular references so destructors work
2156 def aftertrans(files):
2157 def aftertrans(files):
2157 renamefiles = [tuple(t) for t in files]
2158 renamefiles = [tuple(t) for t in files]
2158 def a():
2159 def a():
2159 for src, dest in renamefiles:
2160 for src, dest in renamefiles:
2160 util.rename(src, dest)
2161 util.rename(src, dest)
2161 return a
2162 return a
2162
2163
2163 def instance(ui, path, create):
2164 def instance(ui, path, create):
2164 return localrepository(ui, util.drop_scheme('file', path), create)
2165 return localrepository(ui, util.drop_scheme('file', path), create)
2165
2166
2166 def islocal(path):
2167 def islocal(path):
2167 return True
2168 return True
General Comments 0
You need to be logged in to leave comments. Login now