1 /*
2  *	drivers/i2o/i2o_lan.c
3  *
4  * 	I2O LAN CLASS OSM 		May 26th 2000
5  *
6  *	(C) Copyright 1999, 2000 	University of Helsinki,
7  *		      			Department of Computer Science
8  *
9  * 	This code is still under development / test.
10  *
11  *	This program is free software; you can redistribute it and/or
12  *	modify it under the terms of the GNU General Public License
13  *	as published by the Free Software Foundation; either version
14  *	2 of the License, or (at your option) any later version.
15  *
16  *	Authors: 	Auvo H�kkinen <Auvo.Hakkinen@cs.Helsinki.FI>
17  *	Fixes:		Juha Siev�nen <Juha.Sievanen@cs.Helsinki.FI>
18  *	 		Taneli V�h�kangas <Taneli.Vahakangas@cs.Helsinki.FI>
19  *			Deepak Saxena <deepak@plexity.net>
20  *
21  *	Tested:		in FDDI environment (using SysKonnect's DDM)
22  *			in Gigabit Eth environment (using SysKonnect's DDM)
23  *			in Fast Ethernet environment (using Intel 82558 DDM)
24  *
25  *	TODO:		tests for other LAN classes (Token Ring, Fibre Channel)
26  */
27 
28 #include <linux/config.h>
29 #include <linux/module.h>
30 
31 #include <linux/pci.h>
32 
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/fddidevice.h>
36 #include <linux/trdevice.h>
37 #include <linux/fcdevice.h>
38 
39 #include <linux/skbuff.h>
40 #include <linux/if_arp.h>
41 #include <linux/slab.h>
42 #include <linux/init.h>
43 #include <linux/spinlock.h>
44 #include <linux/tqueue.h>
45 #include <asm/io.h>
46 
47 #include <linux/errno.h>
48 
49 #include <linux/i2o.h>
50 #include "i2o_lan.h"
51 
52 //#define DRIVERDEBUG
53 #ifdef DRIVERDEBUG
54 #define dprintk(s, args...) printk(s, ## args)
55 #else
56 #define dprintk(s, args...)
57 #endif
58 
59 /* The following module parameters are used as default values
60  * for per interface values located in the net_device private area.
61  * Private values are changed via /proc filesystem.
62  */
63 static u32 max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
64 static u32 bucket_thresh   = I2O_LAN_BUCKET_THRESH;
65 static u32 rx_copybreak    = I2O_LAN_RX_COPYBREAK;
66 static u8  tx_batch_mode   = I2O_LAN_TX_BATCH_MODE;
67 static u32 i2o_event_mask  = I2O_LAN_EVENT_MASK;
68 
69 #define MAX_LAN_CARDS 16
70 static struct net_device *i2o_landevs[MAX_LAN_CARDS+1];
71 static int unit = -1; 	  /* device unit number */
72 
73 static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
74 static void i2o_lan_send_post_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
75 static int i2o_lan_receive_post(struct net_device *dev);
76 static void i2o_lan_receive_post_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
77 static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg);
78 
79 static int i2o_lan_reset(struct net_device *dev);
80 static void i2o_lan_handle_event(struct net_device *dev, u32 *msg);
81 
82 /* Structures to register handlers for the incoming replies. */
83 
84 static struct i2o_handler i2o_lan_send_handler = {
85 	i2o_lan_send_post_reply, 	// For send replies
86 	NULL,
87 	NULL,
88 	NULL,
89 	"I2O LAN OSM send",
90 	-1,
91 	I2O_CLASS_LAN
92 };
93 static int lan_send_context;
94 
95 static struct i2o_handler i2o_lan_receive_handler = {
96 	i2o_lan_receive_post_reply,	// For receive replies
97 	NULL,
98 	NULL,
99 	NULL,
100 	"I2O LAN OSM receive",
101 	-1,
102 	I2O_CLASS_LAN
103 };
104 static int lan_receive_context;
105 
106 static struct i2o_handler i2o_lan_handler = {
107 	i2o_lan_reply,			// For other replies
108 	NULL,
109 	NULL,
110 	NULL,
111 	"I2O LAN OSM",
112 	-1,
113 	I2O_CLASS_LAN
114 };
115 static int lan_context;
116 
117 DECLARE_TASK_QUEUE(i2o_post_buckets_task);
118 struct tq_struct run_i2o_post_buckets_task = {
119 	routine: (void (*)(void *)) run_task_queue,
120 	data: (void *) 0
121 };
122 
123 /* Functions to handle message failures and transaction errors:
124 ==============================================================*/
125 
126 /*
127  * i2o_lan_handle_failure(): Fail bit has been set since IOP's message
128  * layer cannot deliver the request to the target, or the target cannot
129  * process the request.
130  */
i2o_lan_handle_failure(struct net_device * dev,u32 * msg)131 static void i2o_lan_handle_failure(struct net_device *dev, u32 *msg)
132 {
133 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
134 	struct i2o_device *i2o_dev = priv->i2o_dev;
135 	struct i2o_controller *iop = i2o_dev->controller;
136 
137 	u32 *preserved_msg = (u32*)(iop->mem_offset + msg[7]);
138 	u32 *sgl_elem = &preserved_msg[4];
139 	struct sk_buff *skb = NULL;
140 	u8 le_flag;
141 
142 	i2o_report_status(KERN_INFO, dev->name, msg);
143 
144 	/* If PacketSend failed, free sk_buffs reserved by upper layers */
145 
146 	if (msg[1] >> 24 == LAN_PACKET_SEND) {
147 		do {
148 			skb = (struct sk_buff *)(sgl_elem[1]);
149 			dev_kfree_skb_irq(skb);
150 
151 			atomic_dec(&priv->tx_out);
152 
153 			le_flag = *sgl_elem >> 31;
154 			sgl_elem +=3;
155 		} while (le_flag == 0); /* Last element flag not set */
156 
157 		if (netif_queue_stopped(dev))
158 			netif_wake_queue(dev);
159 	}
160 
161 	/* If ReceivePost failed, free sk_buffs we have reserved */
162 
163 	if (msg[1] >> 24 == LAN_RECEIVE_POST) {
164 		do {
165 			skb = (struct sk_buff *)(sgl_elem[1]);
166 			dev_kfree_skb_irq(skb);
167 
168 			atomic_dec(&priv->buckets_out);
169 
170 			le_flag = *sgl_elem >> 31;
171 			sgl_elem +=3;
172 		} while (le_flag == 0); /* Last element flag not set */
173 	}
174 
175 	/* Release the preserved msg frame by resubmitting it as a NOP */
176 
177 	preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0;
178 	preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0;
179 	preserved_msg[2] = 0;
180 	i2o_post_message(iop, msg[7]);
181 }
182 /*
183  * i2o_lan_handle_transaction_error(): IOP or DDM has rejected the request
184  * for general cause (format error, bad function code, insufficient resources,
185  * etc.). We get one transaction_error for each failed transaction.
186  */
i2o_lan_handle_transaction_error(struct net_device * dev,u32 * msg)187 static void i2o_lan_handle_transaction_error(struct net_device *dev, u32 *msg)
188 {
189 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
190 	struct sk_buff *skb;
191 
192 	i2o_report_status(KERN_INFO, dev->name, msg);
193 
194 	/* If PacketSend was rejected, free sk_buff reserved by upper layers */
195 
196 	if (msg[1] >> 24 == LAN_PACKET_SEND) {
197 		skb = (struct sk_buff *)(msg[3]); // TransactionContext
198 		dev_kfree_skb_irq(skb);
199 		atomic_dec(&priv->tx_out);
200 
201 		if (netif_queue_stopped(dev))
202 			netif_wake_queue(dev);
203  	}
204 
205 	/* If ReceivePost was rejected, free sk_buff we have reserved */
206 
207 	if (msg[1] >> 24 == LAN_RECEIVE_POST) {
208 		skb = (struct sk_buff *)(msg[3]);
209 		dev_kfree_skb_irq(skb);
210 		atomic_dec(&priv->buckets_out);
211 	}
212 }
213 
214 /*
215  * i2o_lan_handle_status(): Common parts of handling a not succeeded request
216  * (status != SUCCESS).
217  */
i2o_lan_handle_status(struct net_device * dev,u32 * msg)218 static int i2o_lan_handle_status(struct net_device *dev, u32 *msg)
219 {
220 	/* Fail bit set? */
221 
222 	if (msg[0] & MSG_FAIL) {
223 		i2o_lan_handle_failure(dev, msg);
224 		return -1;
225 	}
226 
227 	/* Message rejected for general cause? */
228 
229 	if ((msg[4]>>24) == I2O_REPLY_STATUS_TRANSACTION_ERROR) {
230 		i2o_lan_handle_transaction_error(dev, msg);
231 		return -1;
232 	}
233 
234 	/* Else have to handle it in the callback function */
235 
236 	return 0;
237 }
238 
239 /* Callback functions called from the interrupt routine:
240 =======================================================*/
241 
242 /*
243  * i2o_lan_send_post_reply(): Callback function to handle PostSend replies.
244  */
i2o_lan_send_post_reply(struct i2o_handler * h,struct i2o_controller * iop,struct i2o_message * m)245 static void i2o_lan_send_post_reply(struct i2o_handler *h,
246 			struct i2o_controller *iop, struct i2o_message *m)
247 {
248 	u32 *msg = (u32 *)m;
249 	u8 unit  = (u8)(msg[2]>>16); // InitiatorContext
250 	struct net_device *dev = i2o_landevs[unit];
251 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
252 	u8 trl_count  = msg[3] & 0x000000FF;
253 
254 	if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
255 		if (i2o_lan_handle_status(dev, msg))
256 			return;
257 	}
258 
259 #ifdef DRIVERDEBUG
260 	i2o_report_status(KERN_INFO, dev->name, msg);
261 #endif
262 
263 	/* DDM has handled transmit request(s), free sk_buffs.
264 	 * We get similar single transaction reply also in error cases
265 	 * (except if msg failure or transaction error).
266 	 */
267 	while (trl_count) {
268 		dev_kfree_skb_irq((struct sk_buff *)msg[4 + trl_count]);
269 		dprintk(KERN_INFO "%s: tx skb freed (trl_count=%d).\n",
270 			dev->name, trl_count);
271 		atomic_dec(&priv->tx_out);
272 		trl_count--;
273 	}
274 
275 	/* If priv->tx_out had reached tx_max_out, the queue was stopped */
276 
277 	if (netif_queue_stopped(dev))
278  		netif_wake_queue(dev);
279 }
280 
281 /*
282  * i2o_lan_receive_post_reply(): Callback function to process incoming packets.
283  */
i2o_lan_receive_post_reply(struct i2o_handler * h,struct i2o_controller * iop,struct i2o_message * m)284 static void i2o_lan_receive_post_reply(struct i2o_handler *h,
285 			struct i2o_controller *iop, struct i2o_message *m)
286 {
287 	u32 *msg = (u32 *)m;
288 	u8 unit  = (u8)(msg[2]>>16); // InitiatorContext
289 	struct net_device *dev = i2o_landevs[unit];
290 
291 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
292 	struct i2o_bucket_descriptor *bucket = (struct i2o_bucket_descriptor *)&msg[6];
293 	struct i2o_packet_info *packet;
294 	u8 trl_count = msg[3] & 0x000000FF;
295 	struct sk_buff *skb, *old_skb;
296 	unsigned long flags = 0;
297 
298 	if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
299 		if (i2o_lan_handle_status(dev, msg))
300 			return;
301 
302 		i2o_lan_release_buckets(dev, msg);
303 		return;
304 	}
305 
306 #ifdef DRIVERDEBUG
307 	i2o_report_status(KERN_INFO, dev->name, msg);
308 #endif
309 
310 	/* Else we are receiving incoming post. */
311 
312 	while (trl_count--) {
313 		skb = (struct sk_buff *)bucket->context;
314 		packet = (struct i2o_packet_info *)bucket->packet_info;
315 		atomic_dec(&priv->buckets_out);
316 
317 		/* Sanity checks: Any weird characteristics in bucket? */
318 
319 		if (packet->flags & 0x0f || ! packet->flags & 0x40) {
320 			if (packet->flags & 0x01)
321 				printk(KERN_WARNING "%s: packet with errors, error code=0x%02x.\n",
322 					dev->name, packet->status & 0xff);
323 
324 			/* The following shouldn't happen, unless parameters in
325 			 * LAN_OPERATION group are changed during the run time.
326 			 */
327 			 if (packet->flags & 0x0c)
328 				printk(KERN_DEBUG "%s: multi-bucket packets not supported!\n",
329 					dev->name);
330 
331 			if (! packet->flags & 0x40)
332 				printk(KERN_DEBUG "%s: multiple packets in a bucket not supported!\n",
333 					dev->name);
334 
335 			dev_kfree_skb_irq(skb);
336 
337 			bucket++;
338 			continue;
339 		}
340 
341 		/* Copy short packet to a new skb */
342 
343 		if (packet->len < priv->rx_copybreak) {
344 			old_skb = skb;
345 			skb = (struct sk_buff *)dev_alloc_skb(packet->len+2);
346 			if (skb == NULL) {
347 				printk(KERN_ERR "%s: Can't allocate skb.\n", dev->name);
348 				return;
349 			}
350 			skb_reserve(skb, 2);
351 			memcpy(skb_put(skb, packet->len), old_skb->data, packet->len);
352 
353 			spin_lock_irqsave(&priv->fbl_lock, flags);
354 			if (priv->i2o_fbl_tail < I2O_LAN_MAX_BUCKETS_OUT)
355 				priv->i2o_fbl[++priv->i2o_fbl_tail] = old_skb;
356 			else
357 				dev_kfree_skb_irq(old_skb);
358 
359 			spin_unlock_irqrestore(&priv->fbl_lock, flags);
360 		} else
361 			skb_put(skb, packet->len);
362 
363 		/* Deliver to upper layers */
364 
365 		skb->dev = dev;
366 		skb->protocol = priv->type_trans(skb, dev);
367 		netif_rx(skb);
368 
369 		dev->last_rx = jiffies;
370 
371 		dprintk(KERN_INFO "%s: Incoming packet (%d bytes) delivered "
372 			"to upper level.\n", dev->name, packet->len);
373 
374 		bucket++; // to next Packet Descriptor Block
375 	}
376 
377 #ifdef DRIVERDEBUG
378 	if (msg[5] == 0)
379 		printk(KERN_INFO "%s: DDM out of buckets (priv->count = %d)!\n",
380 		       dev->name, atomic_read(&priv->buckets_out));
381 #endif
382 
383 	/* If DDM has already consumed bucket_thresh buckets, post new ones */
384 
385 	if (atomic_read(&priv->buckets_out) <= priv->max_buckets_out - priv->bucket_thresh) {
386 		run_i2o_post_buckets_task.data = (void *)dev;
387 		queue_task(&run_i2o_post_buckets_task, &tq_immediate);
388 		mark_bh(IMMEDIATE_BH);
389 	}
390 
391 	return;
392 }
393 
394 /*
395  * i2o_lan_reply(): Callback function to handle other incoming messages
396  * except SendPost and ReceivePost.
397  */
i2o_lan_reply(struct i2o_handler * h,struct i2o_controller * iop,struct i2o_message * m)398 static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
399 			  struct i2o_message *m)
400 {
401 	u32 *msg = (u32 *)m;
402 	u8 unit  = (u8)(msg[2]>>16); // InitiatorContext
403 	struct net_device *dev = i2o_landevs[unit];
404 
405 	if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
406 		if (i2o_lan_handle_status(dev, msg))
407 			return;
408 
409 		/* In other error cases just report and continue */
410 
411 		i2o_report_status(KERN_INFO, dev->name, msg);
412 	}
413 
414 #ifdef DRIVERDEBUG
415 	i2o_report_status(KERN_INFO, dev->name, msg);
416 #endif
417 	switch (msg[1] >> 24) {
418 		case LAN_RESET:
419 		case LAN_SUSPEND:
420 			/* default reply without payload */
421 		break;
422 
423 		case I2O_CMD_UTIL_EVT_REGISTER:
424 		case I2O_CMD_UTIL_EVT_ACK:
425 			i2o_lan_handle_event(dev, msg);
426 		break;
427 
428 		case I2O_CMD_UTIL_PARAMS_SET:
429 			/* default reply, results in ReplyPayload (not examined) */
430 			switch (msg[3] >> 16) {
431 			    case 1: dprintk(KERN_INFO "%s: Reply to set MAC filter mask.\n",
432 					dev->name);
433 			    break;
434 			    case 2: dprintk(KERN_INFO "%s: Reply to set MAC table.\n",
435 					dev->name);
436 			    break;
437 			    default: printk(KERN_WARNING "%s: Bad group 0x%04X\n",
438 			 		dev->name,msg[3] >> 16);
439 			}
440 		break;
441 
442 		default:
443 			printk(KERN_ERR "%s: No handler for the reply.\n",
444 		       		dev->name);
445 			i2o_report_status(KERN_INFO, dev->name, msg);
446 	}
447 }
448 
449 /* Functions used by the above callback functions:
450 =================================================*/
451 /*
452  * i2o_lan_release_buckets(): Free unused buckets (sk_buffs).
453  */
i2o_lan_release_buckets(struct net_device * dev,u32 * msg)454 static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg)
455 {
456 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
457 	u8 trl_elem_size = (u8)(msg[3]>>8 & 0x000000FF);
458 	u8 trl_count = (u8)(msg[3] & 0x000000FF);
459 	u32 *pskb = &msg[6];
460 
461 	while (trl_count--) {
462 		dprintk(KERN_DEBUG "%s: Releasing unused rx skb %p (trl_count=%d).\n",
463 			dev->name, (struct sk_buff*)(*pskb),trl_count+1);
464 		dev_kfree_skb_irq((struct sk_buff *)(*pskb));
465 		pskb += 1 + trl_elem_size;
466 		atomic_dec(&priv->buckets_out);
467 	}
468 }
469 
470 /*
471  * i2o_lan_event_reply(): Handle events.
472  */
i2o_lan_handle_event(struct net_device * dev,u32 * msg)473 static void i2o_lan_handle_event(struct net_device *dev, u32 *msg)
474 {
475 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
476 	struct i2o_device *i2o_dev = priv->i2o_dev;
477 	struct i2o_controller *iop = i2o_dev->controller;
478 	u32 max_evt_data_size =iop->status_block->inbound_frame_size-5;
479 	struct i2o_reply {
480 		u32 header[4];
481 		u32 evt_indicator;
482 		u32 data[max_evt_data_size];
483 	} *evt = (struct i2o_reply *)msg;
484 	int evt_data_len = ((msg[0]>>16) - 5) * 4; /* real size*/
485 
486 	printk(KERN_INFO "%s: I2O event - ", dev->name);
487 
488 	if (msg[1]>>24 == I2O_CMD_UTIL_EVT_ACK) {
489 		printk("Event acknowledgement reply.\n");
490 		return;
491 	}
492 
493 	/* Else evt->function == I2O_CMD_UTIL_EVT_REGISTER) */
494 
495 	switch (evt->evt_indicator) {
496 	case I2O_EVT_IND_STATE_CHANGE:  {
497 		struct state_data {
498 			u16 status;
499 			u8 state;
500 			u8 data;
501 		} *evt_data = (struct state_data *)(evt->data[0]);
502 
503 		printk("State chance 0x%08x.\n", evt->data[0]);
504 
505 		/* If the DDM is in error state, recovery may be
506 		 * possible if status = Transmit or Receive Control
507 		 * Unit Inoperable.
508 		 */
509 		if (evt_data->state==0x05 && evt_data->status==0x0003)
510 			i2o_lan_reset(dev);
511 		break;
512 	}
513 
514 	case I2O_EVT_IND_FIELD_MODIFIED: {
515 		u16 *work16 = (u16 *)evt->data;
516 		printk("Group 0x%04x, field %d changed.\n", work16[0], work16[1]);
517 		break;
518 	}
519 
520 	case I2O_EVT_IND_VENDOR_EVT: {
521 		int i;
522 		printk("Vendor event:\n");
523 		for (i = 0; i < evt_data_len / 4; i++)
524 			printk("   0x%08x\n", evt->data[i]);
525 		break;
526 	}
527 
528 	case I2O_EVT_IND_DEVICE_RESET:
529 		/* Spec 2.0 p. 6-121:
530 		 * The event of _DEVICE_RESET should also be responded
531 		 */
532 		printk("Device reset.\n");
533 		if (i2o_event_ack(iop, msg) < 0)
534 			printk("%s: Event Acknowledge timeout.\n", dev->name);
535 		break;
536 
537 #if 0
538 	case I2O_EVT_IND_EVT_MASK_MODIFIED:
539 		printk("Event mask modified, 0x%08x.\n", evt->data[0]);
540 		break;
541 
542 	case I2O_EVT_IND_GENERAL_WARNING:
543 		printk("General warning 0x%04x.\n", evt->data[0]);
544 		break;
545 
546 	case I2O_EVT_IND_CONFIGURATION_FLAG:
547 		printk("Configuration requested.\n");
548 		break;
549 
550 	case I2O_EVT_IND_CAPABILITY_CHANGE:
551 		printk("Capability change 0x%04x.\n", evt->data[0]);
552 		break;
553 
554 	case I2O_EVT_IND_DEVICE_STATE:
555 		printk("Device state changed 0x%08x.\n", evt->data[0]);
556 		break;
557 #endif
558 	case I2O_LAN_EVT_LINK_DOWN:
559 		netif_carrier_off(dev);
560 		printk("Link to the physical device is lost.\n");
561 		break;
562 
563 	case I2O_LAN_EVT_LINK_UP:
564 		netif_carrier_on(dev);
565 		printk("Link to the physical device is (re)established.\n");
566 		break;
567 
568 	case I2O_LAN_EVT_MEDIA_CHANGE:
569 		printk("Media change.\n");
570 		break;
571 	default:
572 		printk("0x%08x. No handler.\n", evt->evt_indicator);
573 	}
574 }
575 
576 /*
577  * i2o_lan_receive_post(): Post buckets to receive packets.
578  */
i2o_lan_receive_post(struct net_device * dev)579 static int i2o_lan_receive_post(struct net_device *dev)
580 {
581 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
582 	struct i2o_device *i2o_dev = priv->i2o_dev;
583 	struct i2o_controller *iop = i2o_dev->controller;
584 	struct sk_buff *skb;
585 	u32 m, *msg;
586 	u32 bucket_len = (dev->mtu + dev->hard_header_len);
587 	u32 total = priv->max_buckets_out - atomic_read(&priv->buckets_out);
588 	u32 bucket_count;
589 	u32 *sgl_elem;
590 	unsigned long flags;
591 
592 	/* Send (total/bucket_count) separate I2O requests */
593 
594 	while (total) {
595 		m = I2O_POST_READ32(iop);
596 		if (m == 0xFFFFFFFF)
597 			return -ETIMEDOUT;
598 		msg = (u32 *)(iop->mem_offset + m);
599 
600 		bucket_count = (total >= priv->sgl_max) ? priv->sgl_max : total;
601 		total -= bucket_count;
602 		atomic_add(bucket_count, &priv->buckets_out);
603 
604 		dprintk(KERN_INFO "%s: Sending %d buckets (size %d) to LAN DDM.\n",
605 			dev->name, bucket_count, bucket_len);
606 
607 		/* Fill in the header */
608 
609 		__raw_writel(I2O_MESSAGE_SIZE(4 + 3 * bucket_count) | SGL_OFFSET_4, msg);
610 		__raw_writel(LAN_RECEIVE_POST<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
611 		__raw_writel(priv->unit << 16 | lan_receive_context, msg+2);
612 		__raw_writel(bucket_count, msg+3);
613 		sgl_elem = &msg[4];
614 
615 		/* Fill in the payload - contains bucket_count SGL elements */
616 
617 		while (bucket_count--) {
618 			spin_lock_irqsave(&priv->fbl_lock, flags);
619 			if (priv->i2o_fbl_tail >= 0)
620 				skb = priv->i2o_fbl[priv->i2o_fbl_tail--];
621 			else {
622 				skb = dev_alloc_skb(bucket_len + 2);
623 				if (skb == NULL) {
624 					spin_unlock_irqrestore(&priv->fbl_lock, flags);
625 					return -ENOMEM;
626 				}
627 				skb_reserve(skb, 2);
628 			}
629 			spin_unlock_irqrestore(&priv->fbl_lock, flags);
630 
631 			__raw_writel(0x51000000 | bucket_len, sgl_elem);
632 			__raw_writel((u32)skb,		      sgl_elem+1);
633 			__raw_writel(virt_to_bus(skb->data),  sgl_elem+2);
634 			sgl_elem += 3;
635 		}
636 
637 		/* set LE flag and post  */
638 		__raw_writel(__raw_readl(sgl_elem-3) | 0x80000000, (sgl_elem-3));
639 		i2o_post_message(iop, m);
640 	}
641 
642 	return 0;
643 }
644 
645 /* Functions called from the network stack, and functions called by them:
646 ========================================================================*/
647 
648 /*
649  * i2o_lan_reset(): Reset the LAN adapter into the operational state and
650  * 	restore it to full operation.
651  */
i2o_lan_reset(struct net_device * dev)652 static int i2o_lan_reset(struct net_device *dev)
653 {
654 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
655 	struct i2o_device *i2o_dev = priv->i2o_dev;
656 	struct i2o_controller *iop = i2o_dev->controller;
657 	u32 msg[5];
658 
659 	dprintk(KERN_INFO "%s: LAN RESET MESSAGE.\n", dev->name);
660 	msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
661 	msg[1] = LAN_RESET<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
662 	msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
663 	msg[3] = 0; 				 // TransactionContext
664 	msg[4] = 0;				 // Keep posted buckets
665 
666 	if (i2o_post_this(iop, msg, sizeof(msg)) < 0)
667 		return -ETIMEDOUT;
668 
669 	return 0;
670 }
671 
672 /*
673  * i2o_lan_suspend(): Put LAN adapter into a safe, non-active state.
674  * 	IOP replies to any LAN class message with status error_no_data_transfer
675  *	/ suspended.
676  */
i2o_lan_suspend(struct net_device * dev)677 static int i2o_lan_suspend(struct net_device *dev)
678 {
679 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
680 	struct i2o_device *i2o_dev = priv->i2o_dev;
681 	struct i2o_controller *iop = i2o_dev->controller;
682 	u32 msg[5];
683 
684 	dprintk(KERN_INFO "%s: LAN SUSPEND MESSAGE.\n", dev->name);
685 	msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
686 	msg[1] = LAN_SUSPEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
687 	msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
688 	msg[3] = 0; 				 // TransactionContext
689 	msg[4] = 1 << 16; 			 // return posted buckets
690 
691 	if (i2o_post_this(iop, msg, sizeof(msg)) < 0)
692 		return -ETIMEDOUT;
693 
694 	return 0;
695 }
696 
697 /*
698  * i2o_set_ddm_parameters:
699  * These settings are done to ensure proper initial values for DDM.
700  * They can be changed via proc file system or vai configuration utility.
701  */
i2o_set_ddm_parameters(struct net_device * dev)702 static void i2o_set_ddm_parameters(struct net_device *dev)
703 {
704 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
705 	struct i2o_device *i2o_dev = priv->i2o_dev;
706 	struct i2o_controller *iop = i2o_dev->controller;
707 	u32 val;
708 
709 	/*
710 	 * When PacketOrphanlimit is set to the maximum packet length,
711 	 * the packets will never be split into two separate buckets
712 	 */
713 	val = dev->mtu + dev->hard_header_len;
714 	if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0004, 2, &val, sizeof(val)) < 0)
715 		printk(KERN_WARNING "%s: Unable to set PacketOrphanLimit.\n",
716 		       dev->name);
717 	else
718 		dprintk(KERN_INFO "%s: PacketOrphanLimit set to %d.\n",
719 			dev->name, val);
720 
721 	/* When RxMaxPacketsBucket = 1, DDM puts only one packet into bucket */
722 
723 	val = 1;
724 	if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0008, 4, &val, sizeof(val)) <0)
725 		printk(KERN_WARNING "%s: Unable to set RxMaxPacketsBucket.\n",
726 		       dev->name);
727 	else
728 		dprintk(KERN_INFO "%s: RxMaxPacketsBucket set to %d.\n",
729 			dev->name, val);
730 	return;
731 }
732 
733 /* Functions called from the network stack:
734 ==========================================*/
735 
736 /*
737  * i2o_lan_open(): Open the device to send/receive packets via
738  * the network device.
739  */
i2o_lan_open(struct net_device * dev)740 static int i2o_lan_open(struct net_device *dev)
741 {
742 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
743 	struct i2o_device *i2o_dev = priv->i2o_dev;
744 	struct i2o_controller *iop = i2o_dev->controller;
745 	u32 mc_addr_group[64];
746 
747 	MOD_INC_USE_COUNT;
748 
749 	if (i2o_claim_device(i2o_dev, &i2o_lan_handler)) {
750 		printk(KERN_WARNING "%s: Unable to claim the I2O LAN device.\n", dev->name);
751 		MOD_DEC_USE_COUNT;
752 		return -EAGAIN;
753 	}
754 	dprintk(KERN_INFO "%s: I2O LAN device (tid=%d) claimed by LAN OSM.\n",
755 		dev->name, i2o_dev->lct_data.tid);
756 
757 	if (i2o_event_register(iop, i2o_dev->lct_data.tid,
758 			       priv->unit << 16 | lan_context, 0, priv->i2o_event_mask) < 0)
759 		printk(KERN_WARNING "%s: Unable to set the event mask.\n", dev->name);
760 
761 	i2o_lan_reset(dev);
762 
763 	/* Get the max number of multicast addresses */
764 
765 	if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0001, -1,
766 			     &mc_addr_group, sizeof(mc_addr_group)) < 0 ) {
767 		printk(KERN_WARNING "%s: Unable to query LAN_MAC_ADDRESS group.\n", dev->name);
768 		MOD_DEC_USE_COUNT;
769 		return -EAGAIN;
770 	}
771 	priv->max_size_mc_table = mc_addr_group[8];
772 
773 	/* Malloc space for free bucket list to resuse reveive post buckets */
774 
775 	priv->i2o_fbl = kmalloc(priv->max_buckets_out * sizeof(struct sk_buff *),
776 				GFP_KERNEL);
777 	if (priv->i2o_fbl == NULL) {
778 		MOD_DEC_USE_COUNT;
779 		return -ENOMEM;
780 	}
781 	priv->i2o_fbl_tail = -1;
782 	priv->send_active = 0;
783 
784 	i2o_set_ddm_parameters(dev);
785 	i2o_lan_receive_post(dev);
786 
787 	netif_start_queue(dev);
788 
789 	return 0;
790 }
791 
792 /*
793  * i2o_lan_close(): End the transfering.
794  */
i2o_lan_close(struct net_device * dev)795 static int i2o_lan_close(struct net_device *dev)
796 {
797 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
798 	struct i2o_device *i2o_dev = priv->i2o_dev;
799 	struct i2o_controller *iop = i2o_dev->controller;
800 	int ret = 0;
801 
802 	netif_stop_queue(dev);
803 	i2o_lan_suspend(dev);
804 
805 	if (i2o_event_register(iop, i2o_dev->lct_data.tid,
806 			       priv->unit << 16 | lan_context, 0, 0) < 0)
807 		printk(KERN_WARNING "%s: Unable to clear the event mask.\n",
808 		       dev->name);
809 
810 	while (priv->i2o_fbl_tail >= 0)
811 		dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
812 
813 	kfree(priv->i2o_fbl);
814 
815 	if (i2o_release_device(i2o_dev, &i2o_lan_handler)) {
816 		printk(KERN_WARNING "%s: Unable to unclaim I2O LAN device "
817 		       "(tid=%d).\n", dev->name, i2o_dev->lct_data.tid);
818 		ret = -EBUSY;
819 	}
820 
821 	MOD_DEC_USE_COUNT;
822 
823 	return ret;
824 }
825 
826 /*
827  * i2o_lan_tx_timeout(): Tx timeout handler.
828  */
i2o_lan_tx_timeout(struct net_device * dev)829 static void i2o_lan_tx_timeout(struct net_device *dev)
830 {
831  	if (!netif_queue_stopped(dev))
832 		netif_start_queue(dev);
833 }
834 
835 /*
836  * i2o_lan_batch_send(): Send packets in batch.
837  * Both i2o_lan_sdu_send and i2o_lan_packet_send use this.
838  */
i2o_lan_batch_send(struct net_device * dev)839 static void i2o_lan_batch_send(struct net_device *dev)
840 {
841 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
842 	struct i2o_controller *iop = priv->i2o_dev->controller;
843 
844 	spin_lock_irq(&priv->tx_lock);
845 	if (priv->tx_count != 0) {
846 		dev->trans_start = jiffies;
847 		i2o_post_message(iop, priv->m);
848 		dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
849 		priv->tx_count = 0;
850 	}
851 	priv->send_active = 0;
852 	spin_unlock_irq(&priv->tx_lock);
853 	MOD_DEC_USE_COUNT;
854 }
855 
856 #ifdef CONFIG_NET_FC
857 /*
858  * i2o_lan_sdu_send(): Send a packet, MAC header added by the DDM.
859  * Must be supported by Fibre Channel, optional for Ethernet/802.3,
860  * Token Ring, FDDI
861  */
i2o_lan_sdu_send(struct sk_buff * skb,struct net_device * dev)862 static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
863 {
864 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
865 	struct i2o_device *i2o_dev = priv->i2o_dev;
866 	struct i2o_controller *iop = i2o_dev->controller;
867 	int tickssofar = jiffies - dev->trans_start;
868 	u32 m, *msg;
869 	u32 *sgl_elem;
870 
871 	spin_lock_irq(&priv->tx_lock);
872 
873 	priv->tx_count++;
874 	atomic_inc(&priv->tx_out);
875 
876 	/*
877 	 * If tx_batch_mode = 0x00 forced to immediate mode
878 	 * If tx_batch_mode = 0x01 forced to batch mode
879 	 * If tx_batch_mode = 0x10 switch automatically, current mode immediate
880 	 * If tx_batch_mode = 0x11 switch automatically, current mode batch
881 	 *	If gap between two packets is > 0 ticks, switch to immediate
882 	 */
883 	if (priv->tx_batch_mode >> 1) // switch automatically
884 		priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;
885 
886 	if (priv->tx_count == 1) {
887 		m = I2O_POST_READ32(iop);
888 		if (m == 0xFFFFFFFF) {
889 			spin_unlock_irq(&priv->tx_lock);
890 			return 1;
891 		}
892 		msg = (u32 *)(iop->mem_offset + m);
893 		priv->m = m;
894 
895 		__raw_writel(NINE_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
896 		__raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
897 		__raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
898 		__raw_writel(1 << 30 | 1 << 3, msg+3); 		 	  // TransmitControlWord
899 
900 		__raw_writel(0xD7000000 | skb->len, msg+4);  	     // MAC hdr included
901 		__raw_writel((u32)skb, msg+5);  		     // TransactionContext
902 		__raw_writel(virt_to_bus(skb->data), msg+6);
903 		__raw_writel((u32)skb->mac.raw, msg+7);
904 		__raw_writel((u32)skb->mac.raw+4, msg+8);
905 
906 		if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {
907 			priv->send_active = 1;
908 			MOD_INC_USE_COUNT;
909 			if (schedule_task(&priv->i2o_batch_send_task) == 0)
910 				MOD_DEC_USE_COUNT;
911 		}
912 	} else {  /* Add new SGL element to the previous message frame */
913 
914 		msg = (u32 *)(iop->mem_offset + priv->m);
915 		sgl_elem = &msg[priv->tx_count * 5 + 1];
916 
917 		__raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 5) | 1<<12 | SGL_OFFSET_4, msg);
918 		__raw_writel(__raw_readl(sgl_elem-5) & 0x7FFFFFFF, sgl_elem-5); /* clear LE flag */
919 		__raw_writel(0xD5000000 | skb->len, sgl_elem);
920 		__raw_writel((u32)skb, sgl_elem+1);
921 		__raw_writel(virt_to_bus(skb->data), sgl_elem+2);
922 		__raw_writel((u32)(skb->mac.raw), sgl_elem+3);
923 		__raw_writel((u32)(skb->mac.raw)+1, sgl_elem+4);
924 	}
925 
926 	/* If tx not in batch mode or frame is full, send immediatelly */
927 
928 	if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {
929 		dev->trans_start = jiffies;
930 		i2o_post_message(iop, priv->m);
931 		dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
932 		priv->tx_count = 0;
933 	}
934 
935 	/* If DDMs TxMaxPktOut reached, stop queueing layer to send more */
936 
937 	if (atomic_read(&priv->tx_out) >= priv->tx_max_out)
938 		netif_stop_queue(dev);
939 
940 	spin_unlock_irq(&priv->tx_lock);
941 	return 0;
942 }
943 #endif /* CONFIG_NET_FC */
944 
945 /*
946  * i2o_lan_packet_send(): Send a packet as is, including the MAC header.
947  *
948  * Must be supported by Ethernet/802.3, Token Ring, FDDI, optional for
949  * Fibre Channel
950  */
i2o_lan_packet_send(struct sk_buff * skb,struct net_device * dev)951 static int i2o_lan_packet_send(struct sk_buff *skb, struct net_device *dev)
952 {
953 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
954 	struct i2o_device *i2o_dev = priv->i2o_dev;
955 	struct i2o_controller *iop = i2o_dev->controller;
956 	int tickssofar = jiffies - dev->trans_start;
957 	u32 m, *msg;
958 	u32 *sgl_elem;
959 
960 	spin_lock_irq(&priv->tx_lock);
961 
962 	priv->tx_count++;
963 	atomic_inc(&priv->tx_out);
964 
965 	/*
966 	 * If tx_batch_mode = 0x00 forced to immediate mode
967 	 * If tx_batch_mode = 0x01 forced to batch mode
968 	 * If tx_batch_mode = 0x10 switch automatically, current mode immediate
969 	 * If tx_batch_mode = 0x11 switch automatically, current mode batch
970 	 *	If gap between two packets is > 0 ticks, switch to immediate
971 	 */
972 	if (priv->tx_batch_mode >> 1) // switch automatically
973 		priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;
974 
975 	if (priv->tx_count == 1) {
976 		m = I2O_POST_READ32(iop);
977 		if (m == 0xFFFFFFFF) {
978 			spin_unlock_irq(&priv->tx_lock);
979 			return 1;
980 		}
981 		msg = (u32 *)(iop->mem_offset + m);
982 		priv->m = m;
983 
984 		__raw_writel(SEVEN_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
985 		__raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
986 		__raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
987 		__raw_writel(1 << 30 | 1 << 3, msg+3); 		 	  // TransmitControlWord
988 			// bit 30: reply as soon as transmission attempt is complete
989 			// bit 3: Suppress CRC generation
990 		__raw_writel(0xD5000000 | skb->len, msg+4);  	     // MAC hdr included
991 		__raw_writel((u32)skb, msg+5);  		     // TransactionContext
992 		__raw_writel(virt_to_bus(skb->data), msg+6);
993 
994 		if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {
995 			priv->send_active = 1;
996 			MOD_INC_USE_COUNT;
997 			if (schedule_task(&priv->i2o_batch_send_task) == 0)
998 				MOD_DEC_USE_COUNT;
999 		}
1000 	} else {  /* Add new SGL element to the previous message frame */
1001 
1002 		msg = (u32 *)(iop->mem_offset + priv->m);
1003 		sgl_elem = &msg[priv->tx_count * 3 + 1];
1004 
1005 		__raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 3) | 1<<12 | SGL_OFFSET_4, msg);
1006 		__raw_writel(__raw_readl(sgl_elem-3) & 0x7FFFFFFF, sgl_elem-3); /* clear LE flag */
1007 		__raw_writel(0xD5000000 | skb->len, sgl_elem);
1008 		__raw_writel((u32)skb, sgl_elem+1);
1009 		__raw_writel(virt_to_bus(skb->data), sgl_elem+2);
1010 	}
1011 
1012 	/* If tx is in immediate mode or frame is full, send now */
1013 
1014 	if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {
1015 		dev->trans_start = jiffies;
1016 		i2o_post_message(iop, priv->m);
1017 		dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
1018 		priv->tx_count = 0;
1019 	}
1020 
1021 	/* If DDMs TxMaxPktOut reached, stop queueing layer to send more */
1022 
1023 	if (atomic_read(&priv->tx_out) >= priv->tx_max_out)
1024 		netif_stop_queue(dev);
1025 
1026 	spin_unlock_irq(&priv->tx_lock);
1027 	return 0;
1028 }
1029 
1030 /*
1031  * i2o_lan_get_stats(): Fill in the statistics.
1032  */
i2o_lan_get_stats(struct net_device * dev)1033 static struct net_device_stats *i2o_lan_get_stats(struct net_device *dev)
1034 {
1035 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1036 	struct i2o_device *i2o_dev = priv->i2o_dev;
1037 	struct i2o_controller *iop = i2o_dev->controller;
1038 	u64 val64[16];
1039 	u64 supported_group[4] = { 0, 0, 0, 0 };
1040 
1041 	if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0100, -1, val64,
1042 			     sizeof(val64)) < 0)
1043 		printk(KERN_INFO "%s: Unable to query LAN_HISTORICAL_STATS.\n", dev->name);
1044 	else {
1045 		dprintk(KERN_DEBUG "%s: LAN_HISTORICAL_STATS queried.\n", dev->name);
1046 		priv->stats.gen.tx_packets = val64[0];
1047 		priv->stats.gen.tx_bytes   = val64[1];
1048 		priv->stats.gen.rx_packets = val64[2];
1049 		priv->stats.gen.rx_bytes   = val64[3];
1050 		priv->stats.gen.tx_errors  = val64[4];
1051 		priv->stats.gen.rx_errors  = val64[5];
1052 		priv->stats.gen.rx_dropped = val64[6];
1053 	}
1054 
1055 	if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0180, -1,
1056 			     &supported_group, sizeof(supported_group)) < 0)
1057 		printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_OPTIONAL_HISTORICAL_STATS.\n", dev->name);
1058 
1059 	if (supported_group[2]) {
1060 		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0183, -1,
1061 				     val64, sizeof(val64)) < 0)
1062 			printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_RX_HISTORICAL_STATS.\n", dev->name);
1063 		else {
1064 			dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_RX_HISTORICAL_STATS queried.\n", dev->name);
1065 			priv->stats.gen.multicast        = val64[4];
1066 			priv->stats.gen.rx_length_errors = val64[10];
1067 			priv->stats.gen.rx_crc_errors    = val64[0];
1068 		}
1069 	}
1070 
1071 	if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET) {
1072 		u64 supported_stats = 0;
1073 		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0200, -1,
1074 				     val64, sizeof(val64)) < 0)
1075 			printk(KERN_INFO "%s: Unable to query LAN_802_3_HISTORICAL_STATS.\n", dev->name);
1076 		else {
1077 			dprintk(KERN_DEBUG "%s: LAN_802_3_HISTORICAL_STATS queried.\n", dev->name);
1078 	 		priv->stats.gen.collisions        = val64[1] + val64[2];
1079 			priv->stats.gen.rx_frame_errors   = val64[0];
1080 			priv->stats.gen.tx_carrier_errors = val64[6];
1081 		}
1082 
1083 		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0280, -1,
1084 				     &supported_stats, sizeof(supported_stats)) < 0)
1085 			printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_802_3_HISTORICAL_STATS.\n", dev->name);
1086 
1087 		if (supported_stats != 0) {
1088 			if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0281, -1,
1089 					     val64, sizeof(val64)) < 0)
1090 				printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_802_3_HISTORICAL_STATS.\n", dev->name);
1091 			else {
1092 				dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_802_3_HISTORICAL_STATS queried.\n", dev->name);
1093 				if (supported_stats & 0x1)
1094 					priv->stats.gen.rx_over_errors =
1095 								val64[0];
1096 				if (supported_stats & 0x4)
1097 					priv->stats.gen.tx_heartbeat_errors =
1098 								val64[2];
1099 			}
1100 		}
1101 	}
1102 
1103 #ifdef CONFIG_TR
1104 	if (i2o_dev->lct_data.sub_class == I2O_LAN_TR) {
1105 		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0300, -1,
1106 				     val64, sizeof(val64)) < 0)
1107 			printk(KERN_INFO "%s: Unable to query LAN_802_5_HISTORICAL_STATS.\n", dev->name);
1108 		else {
1109 			struct tr_statistics *stats =
1110 				(struct tr_statistics *)&priv->stats;
1111 			dprintk(KERN_DEBUG "%s: LAN_802_5_HISTORICAL_STATS queried.\n", dev->name);
1112 
1113 			stats->line_errors		= val64[0];
1114 			stats->internal_errors		= val64[7];
1115 			stats->burst_errors		= val64[4];
1116 			stats->A_C_errors		= val64[2];
1117 			stats->abort_delimiters		= val64[3];
1118 			stats->lost_frames		= val64[1];
1119 			/* stats->recv_congest_count	= ?;  FIXME ??*/
1120 			stats->frame_copied_errors	= val64[5];
1121 			stats->frequency_errors		= val64[6];
1122 			stats->token_errors		= val64[9];
1123 		}
1124 		/* Token Ring optional stats not yet defined */
1125 	}
1126 #endif
1127 
1128 #ifdef CONFIG_FDDI
1129 	if (i2o_dev->lct_data.sub_class == I2O_LAN_FDDI) {
1130 		if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0400, -1,
1131 				     val64, sizeof(val64)) < 0)
1132 			printk(KERN_INFO "%s: Unable to query LAN_FDDI_HISTORICAL_STATS.\n", dev->name);
1133 		else {
1134 			dprintk(KERN_DEBUG "%s: LAN_FDDI_HISTORICAL_STATS queried.\n", dev->name);
1135 			priv->stats.smt_cf_state = val64[0];
1136 			memcpy(priv->stats.mac_upstream_nbr, &val64[1], FDDI_K_ALEN);
1137 			memcpy(priv->stats.mac_downstream_nbr, &val64[2], FDDI_K_ALEN);
1138 			priv->stats.mac_error_cts = val64[3];
1139 			priv->stats.mac_lost_cts  = val64[4];
1140 			priv->stats.mac_rmt_state = val64[5];
1141 			memcpy(priv->stats.port_lct_fail_cts, &val64[6], 8);
1142 			memcpy(priv->stats.port_lem_reject_cts, &val64[7], 8);
1143 			memcpy(priv->stats.port_lem_cts, &val64[8], 8);
1144 			memcpy(priv->stats.port_pcm_state, &val64[9], 8);
1145 		}
1146 		/* FDDI optional stats not yet defined */
1147 	}
1148 #endif
1149 
1150 #ifdef CONFIG_NET_FC
1151 	/* Fibre Channel Statistics not yet defined in 1.53 nor 2.0 */
1152 #endif
1153 
1154 	return (struct net_device_stats *)&priv->stats;
1155 }
1156 
1157 /*
1158  * i2o_lan_set_mc_filter(): Post a request to set multicast filter.
1159  */
i2o_lan_set_mc_filter(struct net_device * dev,u32 filter_mask)1160 int i2o_lan_set_mc_filter(struct net_device *dev, u32 filter_mask)
1161 {
1162 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1163 	struct i2o_device *i2o_dev = priv->i2o_dev;
1164 	struct i2o_controller *iop = i2o_dev->controller;
1165 	u32 msg[10];
1166 
1167 	msg[0] = TEN_WORD_MSG_SIZE | SGL_OFFSET_5;
1168 	msg[1] = I2O_CMD_UTIL_PARAMS_SET << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid;
1169 	msg[2] = priv->unit << 16 | lan_context;
1170 	msg[3] = 0x0001 << 16 | 3 ;	// TransactionContext: group&field
1171 	msg[4] = 0;
1172 	msg[5] = 0xCC000000 | 16; 			// Immediate data SGL
1173 	msg[6] = 1;					// OperationCount
1174 	msg[7] = 0x0001<<16 | I2O_PARAMS_FIELD_SET;	// Group, Operation
1175 	msg[8] = 3 << 16 | 1; 				// FieldIndex, FieldCount
1176 	msg[9] = filter_mask;				// Value
1177 
1178 	return i2o_post_this(iop, msg, sizeof(msg));
1179 }
1180 
1181 /*
1182  * i2o_lan_set_mc_table(): Post a request to set LAN_MULTICAST_MAC_ADDRESS table.
1183  */
i2o_lan_set_mc_table(struct net_device * dev)1184 int i2o_lan_set_mc_table(struct net_device *dev)
1185 {
1186 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1187 	struct i2o_device *i2o_dev = priv->i2o_dev;
1188 	struct i2o_controller *iop = i2o_dev->controller;
1189 	struct dev_mc_list *mc;
1190 	u32 msg[10 + 2 * dev->mc_count];
1191 	u8 *work8 = (u8 *)(msg + 10);
1192 
1193 	msg[0] = I2O_MESSAGE_SIZE(10 + 2 * dev->mc_count) | SGL_OFFSET_5;
1194 	msg[1] = I2O_CMD_UTIL_PARAMS_SET << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid;
1195 	msg[2] = priv->unit << 16 | lan_context;	// InitiatorContext
1196 	msg[3] = 0x0002 << 16 | (u16)-1;		// TransactionContext
1197 	msg[4] = 0;					// OperationFlags
1198 	msg[5] = 0xCC000000 | (16 + 8 * dev->mc_count);	// Immediate data SGL
1199 	msg[6] = 2;					// OperationCount
1200 	msg[7] = 0x0002 << 16 | I2O_PARAMS_TABLE_CLEAR;	// Group, Operation
1201 	msg[8] = 0x0002 << 16 | I2O_PARAMS_ROW_ADD;     // Group, Operation
1202 	msg[9] = dev->mc_count << 16 | (u16)-1; 	// RowCount, FieldCount
1203 
1204         for (mc = dev->mc_list; mc ; mc = mc->next, work8 += 8) {
1205 		  memset(work8, 0, 8);
1206                   memcpy(work8, mc->dmi_addr, mc->dmi_addrlen); // Values
1207 	}
1208 
1209 	return i2o_post_this(iop, msg, sizeof(msg));
1210 }
1211 
1212 /*
1213  * i2o_lan_set_multicast_list(): Enable a network device to receive packets
1214  *      not send to the protocol address.
1215  */
i2o_lan_set_multicast_list(struct net_device * dev)1216 static void i2o_lan_set_multicast_list(struct net_device *dev)
1217 {
1218 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1219 	u32 filter_mask;
1220 
1221 	if (dev->flags & IFF_PROMISC) {
1222 		filter_mask = 0x00000002;
1223 		dprintk(KERN_INFO "%s: Enabling promiscuous mode...\n", dev->name);
1224 	} else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > priv->max_size_mc_table) {
1225 		filter_mask = 0x00000004;
1226 		dprintk(KERN_INFO "%s: Enabling all multicast mode...\n", dev->name);
1227 	} else if (dev->mc_count) {
1228 		filter_mask = 0x00000000;
1229 		dprintk(KERN_INFO "%s: Enabling multicast mode...\n", dev->name);
1230 		if (i2o_lan_set_mc_table(dev) < 0)
1231 			printk(KERN_WARNING "%s: Unable to send MAC table.\n", dev->name);
1232 	} else {
1233 		filter_mask = 0x00000300; // Broadcast, Multicast disabled
1234 		dprintk(KERN_INFO "%s: Enabling unicast mode...\n", dev->name);
1235 	}
1236 
1237 	/* Finally copy new FilterMask to DDM */
1238 
1239 	if (i2o_lan_set_mc_filter(dev, filter_mask) < 0)
1240 		printk(KERN_WARNING "%s: Unable to send MAC FilterMask.\n", dev->name);
1241 }
1242 
1243 /*
1244  * i2o_lan_change_mtu(): Change maximum transfer unit size.
1245  */
i2o_lan_change_mtu(struct net_device * dev,int new_mtu)1246 static int i2o_lan_change_mtu(struct net_device *dev, int new_mtu)
1247 {
1248 	struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1249 	struct i2o_device *i2o_dev = priv->i2o_dev;
1250 	u32 max_pkt_size;
1251 
1252 	if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1253 		 	     0x0000, 6, &max_pkt_size, 4) < 0)
1254 		return -EFAULT;
1255 
1256 	if (new_mtu < 68 || new_mtu > 9000 || new_mtu > max_pkt_size)
1257 		return -EINVAL;
1258 
1259 	dev->mtu = new_mtu;
1260 
1261 	i2o_lan_suspend(dev);   	// to SUSPENDED state, return buckets
1262 
1263 	while (priv->i2o_fbl_tail >= 0) // free buffered buckets
1264 		dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
1265 
1266 	i2o_lan_reset(dev);		// to OPERATIONAL state
1267 	i2o_set_ddm_parameters(dev); 	// reset some parameters
1268 	i2o_lan_receive_post(dev); 	// post new buckets (new size)
1269 
1270 	return 0;
1271 }
1272 
1273 /* Functions to initialize I2O LAN OSM:
1274 ======================================*/
1275 
1276 /*
1277  * i2o_lan_register_device(): Register LAN class device to kernel.
1278  */
i2o_lan_register_device(struct i2o_device * i2o_dev)1279 struct net_device *i2o_lan_register_device(struct i2o_device *i2o_dev)
1280 {
1281 	struct net_device *dev = NULL;
1282 	struct i2o_lan_local *priv = NULL;
1283 	u8 hw_addr[8];
1284 	u32 tx_max_out = 0;
1285 	unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
1286 	void (*unregister_dev)(struct net_device *dev);
1287 
1288 	switch (i2o_dev->lct_data.sub_class) {
1289 	case I2O_LAN_ETHERNET:
1290 		dev = init_etherdev(NULL, sizeof(struct i2o_lan_local));
1291 		if (dev == NULL)
1292 			return NULL;
1293 		type_trans = eth_type_trans;
1294 		unregister_dev = unregister_netdev;
1295 		break;
1296 
1297 #ifdef CONFIG_ANYLAN
1298 	case I2O_LAN_100VG:
1299 		printk(KERN_ERR "i2o_lan: 100base VG not yet supported.\n");
1300 		return NULL;
1301 		break;
1302 #endif
1303 
1304 #ifdef CONFIG_TR
1305 	case I2O_LAN_TR:
1306 		dev = init_trdev(NULL, sizeof(struct i2o_lan_local));
1307 		if (dev==NULL)
1308 			return NULL;
1309 		type_trans = tr_type_trans;
1310 		unregister_dev = unregister_trdev;
1311 		break;
1312 #endif
1313 
1314 #ifdef CONFIG_FDDI
1315 	case I2O_LAN_FDDI:
1316 	{
1317 		int size = sizeof(struct net_device) + sizeof(struct i2o_lan_local);
1318 
1319 		dev = (struct net_device *) kmalloc(size, GFP_KERNEL);
1320 		if (dev == NULL)
1321 			return NULL;
1322 		memset((char *)dev, 0, size);
1323 	    	dev->priv = (void *)(dev + 1);
1324 
1325 		if (dev_alloc_name(dev, "fddi%d") < 0) {
1326 			printk(KERN_WARNING "i2o_lan: Too many FDDI devices.\n");
1327 			kfree(dev);
1328 			return NULL;
1329 		}
1330 		type_trans = fddi_type_trans;
1331 		unregister_dev = (void *)unregister_netdevice;
1332 
1333 		fddi_setup(dev);
1334 		register_netdev(dev);
1335 	}
1336 	break;
1337 #endif
1338 
1339 #ifdef CONFIG_NET_FC
1340 	case I2O_LAN_FIBRE_CHANNEL:
1341 		dev = init_fcdev(NULL, sizeof(struct i2o_lan_local));
1342 		if (dev == NULL)
1343 			return NULL;
1344 		type_trans = NULL;
1345 /* FIXME: Move fc_type_trans() from drivers/net/fc/iph5526.c to net/802/fc.c
1346  * and export it in include/linux/fcdevice.h
1347  *		type_trans = fc_type_trans;
1348  */
1349 		unregister_dev = (void *)unregister_fcdev;
1350 		break;
1351 #endif
1352 
1353 	case I2O_LAN_UNKNOWN:
1354 	default:
1355 		printk(KERN_ERR "i2o_lan: LAN type 0x%04x not supported.\n",
1356 		       i2o_dev->lct_data.sub_class);
1357 		return NULL;
1358 	}
1359 
1360 	priv = (struct i2o_lan_local *)dev->priv;
1361 	priv->i2o_dev = i2o_dev;
1362 	priv->type_trans = type_trans;
1363 	priv->sgl_max = (i2o_dev->controller->status_block->inbound_frame_size - 4) / 3;
1364 	atomic_set(&priv->buckets_out, 0);
1365 
1366 	/* Set default values for user configurable parameters */
1367 	/* Private values are changed via /proc file system */
1368 
1369 	priv->max_buckets_out = max_buckets_out;
1370 	priv->bucket_thresh   = bucket_thresh;
1371 	priv->rx_copybreak    = rx_copybreak;
1372 	priv->tx_batch_mode   = tx_batch_mode & 0x03;
1373 	priv->i2o_event_mask  = i2o_event_mask;
1374 
1375 	priv->tx_lock	      = SPIN_LOCK_UNLOCKED;
1376 	priv->fbl_lock	      = SPIN_LOCK_UNLOCKED;
1377 
1378 	unit++;
1379 	i2o_landevs[unit] = dev;
1380 	priv->unit = unit;
1381 
1382 	if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1383 			     0x0001, 0, &hw_addr, sizeof(hw_addr)) < 0) {
1384 		printk(KERN_ERR "%s: Unable to query hardware address.\n", dev->name);
1385 		unit--;
1386 		unregister_dev(dev);
1387 		kfree(dev);
1388 		return NULL;
1389 	}
1390 	dprintk(KERN_DEBUG "%s: hwaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1391  		dev->name, hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3],
1392 		hw_addr[4], hw_addr[5]);
1393 
1394 	dev->addr_len = 6;
1395 	memcpy(dev->dev_addr, hw_addr, 6);
1396 
1397 	if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1398 			     0x0007, 2, &tx_max_out, sizeof(tx_max_out)) < 0) {
1399 		printk(KERN_ERR "%s: Unable to query max TX queue.\n", dev->name);
1400 		unit--;
1401 		unregister_dev(dev);
1402 		kfree(dev);
1403 		return NULL;
1404 	}
1405 	dprintk(KERN_INFO "%s: Max TX Outstanding = %d.\n", dev->name, tx_max_out);
1406 	priv->tx_max_out = tx_max_out;
1407 	atomic_set(&priv->tx_out, 0);
1408 	priv->tx_count = 0;
1409 
1410 	INIT_LIST_HEAD(&priv->i2o_batch_send_task.list);
1411 	priv->i2o_batch_send_task.sync    = 0;
1412 	priv->i2o_batch_send_task.routine = (void *)i2o_lan_batch_send;
1413 	priv->i2o_batch_send_task.data    = (void *)dev;
1414 
1415 	dev->open		= i2o_lan_open;
1416 	dev->stop		= i2o_lan_close;
1417 	dev->get_stats		= i2o_lan_get_stats;
1418 	dev->set_multicast_list = i2o_lan_set_multicast_list;
1419 	dev->tx_timeout		= i2o_lan_tx_timeout;
1420 	dev->watchdog_timeo	= I2O_LAN_TX_TIMEOUT;
1421 
1422 #ifdef CONFIG_NET_FC
1423 	if (i2o_dev->lct_data.sub_class == I2O_LAN_FIBRE_CHANNEL)
1424 		dev->hard_start_xmit = i2o_lan_sdu_send;
1425 	else
1426 #endif
1427 		dev->hard_start_xmit = i2o_lan_packet_send;
1428 
1429 	if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET)
1430 		dev->change_mtu	= i2o_lan_change_mtu;
1431 
1432 	return dev;
1433 }
1434 
i2o_lan_init(void)1435 static int __init i2o_lan_init(void)
1436 {
1437 	struct net_device *dev;
1438 	int i;
1439 
1440 	printk(KERN_INFO "I2O LAN OSM (C) 1999 University of Helsinki.\n");
1441 
1442 	/* Module params are used as global defaults for private values */
1443 
1444 	if (max_buckets_out > I2O_LAN_MAX_BUCKETS_OUT)
1445 		max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
1446 	if (bucket_thresh > max_buckets_out)
1447 		bucket_thresh = max_buckets_out;
1448 
1449 	/* Install handlers for incoming replies */
1450 
1451 	if (i2o_install_handler(&i2o_lan_send_handler) < 0) {
1452  		printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1453 		return -EINVAL;
1454 	}
1455 	lan_send_context = i2o_lan_send_handler.context;
1456 
1457 	if (i2o_install_handler(&i2o_lan_receive_handler) < 0) {
1458  		printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1459 		return -EINVAL;
1460 	}
1461 	lan_receive_context = i2o_lan_receive_handler.context;
1462 
1463 	if (i2o_install_handler(&i2o_lan_handler) < 0) {
1464  		printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1465 		return -EINVAL;
1466 	}
1467 	lan_context = i2o_lan_handler.context;
1468 
1469 	for(i=0; i <= MAX_LAN_CARDS; i++)
1470 		i2o_landevs[i] = NULL;
1471 
1472 	for (i=0; i < MAX_I2O_CONTROLLERS; i++) {
1473 		struct i2o_controller *iop = i2o_find_controller(i);
1474 		struct i2o_device *i2o_dev;
1475 
1476 		if (iop==NULL)
1477 			continue;
1478 
1479 		for (i2o_dev=iop->devices;i2o_dev != NULL;i2o_dev=i2o_dev->next) {
1480 
1481 			if (i2o_dev->lct_data.class_id != I2O_CLASS_LAN)
1482 				continue;
1483 
1484 			/* Make sure device not already claimed by an ISM */
1485 			if (i2o_dev->lct_data.user_tid != 0xFFF)
1486 				continue;
1487 
1488 			if (unit == MAX_LAN_CARDS) {
1489 				i2o_unlock_controller(iop);
1490 				printk(KERN_WARNING "i2o_lan: Too many I2O LAN devices.\n");
1491 				return -EINVAL;
1492 			}
1493 
1494 			dev = i2o_lan_register_device(i2o_dev);
1495  			if (dev == NULL) {
1496 				printk(KERN_ERR "i2o_lan: Unable to register I2O LAN device 0x%04x.\n",
1497 				       i2o_dev->lct_data.sub_class);
1498 				continue;
1499 			}
1500 
1501 			printk(KERN_INFO "%s: I2O LAN device registered, "
1502 				"subclass = 0x%04x, unit = %d, tid = %d.\n",
1503 				dev->name, i2o_dev->lct_data.sub_class,
1504 				((struct i2o_lan_local *)dev->priv)->unit,
1505 				i2o_dev->lct_data.tid);
1506 		}
1507 
1508 		i2o_unlock_controller(iop);
1509 	}
1510 
1511 	dprintk(KERN_INFO "%d I2O LAN devices found and registered.\n", unit+1);
1512 
1513 	return 0;
1514 }
1515 
i2o_lan_exit(void)1516 static void i2o_lan_exit(void)
1517 {
1518 	int i;
1519 
1520 	for (i = 0; i <= unit; i++) {
1521 		struct net_device *dev = i2o_landevs[i];
1522 		struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1523 		struct i2o_device *i2o_dev = priv->i2o_dev;
1524 
1525 		switch (i2o_dev->lct_data.sub_class) {
1526 		case I2O_LAN_ETHERNET:
1527 			unregister_netdev(dev);
1528 			break;
1529 #ifdef CONFIG_FDDI
1530 		case I2O_LAN_FDDI:
1531 			unregister_netdevice(dev);
1532 			break;
1533 #endif
1534 #ifdef CONFIG_TR
1535 		case I2O_LAN_TR:
1536 			unregister_trdev(dev);
1537 			break;
1538 #endif
1539 #ifdef CONFIG_NET_FC
1540 		case I2O_LAN_FIBRE_CHANNEL:
1541 			unregister_fcdev(dev);
1542 			break;
1543 #endif
1544 		default:
1545 			printk(KERN_WARNING "%s: Spurious I2O LAN subclass 0x%08x.\n",
1546 			       dev->name, i2o_dev->lct_data.sub_class);
1547 		}
1548 
1549 		dprintk(KERN_INFO "%s: I2O LAN device unregistered.\n",
1550 			dev->name);
1551 		kfree(dev);
1552 	}
1553 
1554 	i2o_remove_handler(&i2o_lan_handler);
1555 	i2o_remove_handler(&i2o_lan_send_handler);
1556 	i2o_remove_handler(&i2o_lan_receive_handler);
1557 }
1558 
1559 EXPORT_NO_SYMBOLS;
1560 
1561 MODULE_AUTHOR("University of Helsinki, Department of Computer Science");
1562 MODULE_DESCRIPTION("I2O Lan OSM");
1563 MODULE_LICENSE("GPL");
1564 
1565 
1566 MODULE_PARM(max_buckets_out, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
1567 MODULE_PARM_DESC(max_buckets_out, "Total number of buckets to post (1-)");
1568 MODULE_PARM(bucket_thresh, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
1569 MODULE_PARM_DESC(bucket_thresh, "Bucket post threshold (1-)");
1570 MODULE_PARM(rx_copybreak, "1-" "i");
1571 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy only small frames (1-)");
1572 MODULE_PARM(tx_batch_mode, "0-2" "i");
1573 MODULE_PARM_DESC(tx_batch_mode, "0=Send immediatelly, 1=Send in batches, 2=Switch automatically");
1574 
1575 module_init(i2o_lan_init);
1576 module_exit(i2o_lan_exit);
1577