return ret;
}
+unsigned int dfu_polltimeout_nand(struct dfu_entity *dfu)
+{
+ /*
+ * Currently, Poll Timeout != 0 is only needed on nand
+ * ubi partition, as the not used sectors need an erase
+ */
+ if (dfu->data.nand.ubi)
+ return DFU_MANIFEST_POLL_TIMEOUT;
+
+ return DFU_DEFAULT_POLL_TIMEOUT;
+}
+
int dfu_fill_entity_nand(struct dfu_entity *dfu, char *s)
{
char *st;
dfu->read_medium = dfu_read_medium_nand;
dfu->write_medium = dfu_write_medium_nand;
dfu->flush_medium = dfu_flush_medium_nand;
+ dfu->poll_timeout = dfu_polltimeout_nand;
/* initial state */
dfu->inited = 0;
req->length, f_dfu->blk_seq_num);
}
+static inline int dfu_get_manifest_timeout(struct dfu_entity *dfu)
+{
+ return dfu->poll_timeout ? dfu->poll_timeout(dfu) :
+ DFU_MANIFEST_POLL_TIMEOUT;
+}
+
static void handle_getstatus(struct usb_request *req)
{
struct dfu_status *dstat = (struct dfu_status *)req->buf;
struct f_dfu *f_dfu = req->context;
+ struct dfu_entity *dfu = dfu_get_entity(f_dfu->altsetting);
dfu_set_poll_timeout(dstat, 0);
f_dfu->dfu_state = DFU_STATE_dfuMANIFEST;
break;
case DFU_STATE_dfuMANIFEST:
- dfu_set_poll_timeout(dstat, DFU_MANIFEST_POLL_TIMEOUT);
+ dfu_set_poll_timeout(dstat, dfu_get_manifest_timeout(dfu));
+ break;
default:
break;
}
u64 offset, void *buf, long *len);
int (*flush_medium)(struct dfu_entity *dfu);
+ unsigned int (*poll_timeout)(struct dfu_entity *dfu);
struct list_head list;